You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by bl...@apache.org on 2022/10/21 10:11:26 UTC

[cassandra] 04/05: Adds a trie-based memtable implementation

This is an automated email from the ASF dual-hosted git repository.

blambov pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 7c55c73825e341315e520381968338d57afbb67a
Author: Branimir Lambov <br...@datastax.com>
AuthorDate: Wed Jan 20 15:42:36 2021 +0200

    Adds a trie-based memtable implementation
    
    patch by Branimir Lambov; reviewed by Jason Rutherglen, Jacek Lewandowski, Andres de la Peña and Caleb Rackliffe for CASSANDRA-17240
---
 .circleci/config-2_1.yml                           |  51 ++
 .circleci/config-2_1.yml.high_res.patch            |   8 +-
 .circleci/config-2_1.yml.mid_res.patch             |  36 +-
 .circleci/config.yml                               | 246 ++++++-
 .circleci/config.yml.HIGHRES                       | 256 +++++++-
 .circleci/config.yml.LOWRES                        | 256 +++++++-
 .circleci/config.yml.MIDRES                        | 256 +++++++-
 .circleci/generate.sh                              |   1 +
 CHANGES.txt                                        |   2 +
 NEWS.txt                                           |   3 +
 build.xml                                          |  37 +-
 .../db/AbstractArrayClusteringPrefix.java          |   3 +-
 .../db/AbstractBufferClusteringPrefix.java         |  13 +-
 .../apache/cassandra/db/BufferDecoratedKey.java    |   6 +
 .../org/apache/cassandra/db/ClusteringPrefix.java  |   8 +-
 .../org/apache/cassandra/db/ColumnFamilyStore.java |  52 +-
 src/java/org/apache/cassandra/db/DecoratedKey.java |   1 +
 .../org/apache/cassandra/db/NativeClustering.java  |  22 +-
 .../apache/cassandra/db/NativeDecoratedKey.java    |   6 +
 .../db/memtable/AbstractAllocatorMemtable.java     |   8 +-
 .../db/memtable/AbstractShardedMemtable.java       | 103 +++
 .../apache/cassandra/db/memtable/Memtable_API.md   |  21 +-
 .../cassandra/db/memtable/ShardBoundaries.java     |   3 +-
 .../db/memtable/ShardedMemtableConfigMXBean.java   |  32 +-
 .../db/memtable/ShardedSkipListMemtable.java       |  58 +-
 .../cassandra/db/memtable/SkipListMemtable.java    |  17 +-
 .../apache/cassandra/db/memtable/TrieMemtable.java | 722 +++++++++++++++++++++
 .../db/partitions/AbstractBTreePartition.java      |  78 +--
 .../db/partitions/AtomicBTreePartition.java        | 257 ++------
 .../db/partitions/BTreePartitionData.java          | 101 +++
 .../db/partitions/BTreePartitionUpdater.java       | 184 ++++++
 .../db/partitions/CachedBTreePartition.java        |   6 +-
 .../db/partitions/ImmutableBTreePartition.java     |   8 +-
 .../cassandra/db/partitions/PartitionUpdate.java   |  32 +-
 .../cassandra/db/tries/MemtableReadTrie.java       |   8 +-
 .../apache/cassandra/db/tries/MemtableTrie.java    |  28 +-
 .../org/apache/cassandra/db/tries/MergeTrie.java   |   1 -
 .../io/sstable/metadata/MetadataCollector.java     |   8 +-
 .../apache/cassandra/metrics/MinMaxAvgMetric.java  | 106 +++
 .../cassandra/metrics/TrieMemtableMetricsView.java |  90 +++
 .../org/apache/cassandra/utils/ByteBufferUtil.java |   5 +-
 .../utils/bytecomparable/ByteComparable.md         |   1 +
 .../cassandra/utils/memory/EnsureOnHeap.java       |   2 +
 .../apache/cassandra/utils/memory/HeapPool.java    |   6 +-
 test/conf/cassandra.yaml                           |  11 +-
 test/conf/trie_memtable.yaml                       |  20 +
 .../btree/AtomicBTreePartitionUpdateBench.java     |  15 +-
 .../test/microbench/instance/ReadTest.java         |  10 +
 .../microbench/tries/MemtableTrieWriteBench.java   |   2 +-
 .../cql3/validation/operations/AlterTest.java      |  17 +-
 .../cql3/validation/operations/CreateTest.java     |   5 +-
 .../apache/cassandra/db/ClusteringPrefixTest.java  | 234 +++++++
 .../cassandra/db/memtable/MemtableQuickTest.java   |   3 +-
 .../db/memtable/MemtableSizeHeapBuffersTest.java   |   7 +-
 .../memtable/MemtableSizeOffheapBuffersTest.java   |   5 +-
 .../memtable/MemtableSizeOffheapObjectsTest.java   |   7 +-
 .../db/memtable/MemtableSizeTestBase.java          |  15 +-
 .../db/memtable/MemtableSizeUnslabbedTest.java     |   5 +-
 .../db/memtable/ShardedMemtableConfigTest.java     |  68 ++
 .../cassandra/db/tries/MemtableTriePutTest.java    |   4 +-
 .../cassandra/db/tries/MemtableTrieTestBase.java   |   2 +
 .../org/apache/cassandra/db/tries/TrieToDot.java   |   2 +-
 .../cassandra/io/sstable/SSTableMetadataTest.java  |  44 +-
 .../cassandra/metrics/TrieMemtableMetricsTest.java | 210 ++++++
 .../org/apache/cassandra/tools/BulkLoaderTest.java |  12 +-
 .../org/apache/cassandra/tools/GetVersionTest.java |   2 +-
 .../apache/cassandra/tools/OfflineToolUtils.java   |  24 +-
 .../tools/SSTableExpiredBlockersTest.java          |   2 +-
 .../tools/SSTableExportSchemaLoadingTest.java      |   2 +-
 .../apache/cassandra/tools/SSTableExportTest.java  |   2 +-
 .../cassandra/tools/SSTableLevelResetterTest.java  |   2 +-
 .../cassandra/tools/SSTableMetadataViewerTest.java |   4 +-
 .../cassandra/tools/SSTableOfflineRelevelTest.java |   2 +-
 .../tools/SSTableRepairedAtSetterTest.java         |   8 +-
 .../cassandra/tools/ToolsSchemaLoadingTest.java    |  10 +-
 75 files changed, 3331 insertions(+), 573 deletions(-)

diff --git a/.circleci/config-2_1.yml b/.circleci/config-2_1.yml
index a3e865089e..d9cd921dde 100644
--- a/.circleci/config-2_1.yml
+++ b/.circleci/config-2_1.yml
@@ -127,6 +127,7 @@ default_env_vars: &default_env_vars
     # REPEATED_ANT_TEST_TARGET: test-jvm-dtest-some
     # REPEATED_ANT_TEST_TARGET: test-cdc
     # REPEATED_ANT_TEST_TARGET: test-compression
+    # REPEATED_ANT_TEST_TARGET: test-trie
     # REPEATED_ANT_TEST_TARGET: test-system-keyspace-directory
     REPEATED_ANT_TEST_TARGET: testsome
     # The name of JUnit class to be run multiple times, for example:
@@ -323,6 +324,18 @@ j8_separate_jobs: &j8_separate_jobs
         requires:
           - start_utests_compression_repeat
           - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+          - start_utests_trie
+          - j8_build
+    - start_utests_trie_repeat:
+        type: approval
+    - utests_trie_repeat:
+        requires:
+          - start_utests_trie_repeat
+          - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
@@ -559,6 +572,16 @@ j8_pre-commit_jobs: &j8_pre-commit_jobs
         requires:
           - start_utests_compression
           - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+          - start_utests_trie
+          - j8_build
+    - utests_trie_repeat:
+        requires:
+          - start_utests_trie
+          - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
@@ -1018,6 +1041,16 @@ jobs:
       - run_parallel_junit_tests:
           target: testclasslist-compression
 
+  utests_trie:
+    <<: *j8_par_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - create_junit_containers
+      - log_environment
+      - run_parallel_junit_tests:
+          target: testclasslist-trie
+
   utests_stress:
     <<: *j8_seq_executor
     steps:
@@ -1264,6 +1297,14 @@ jobs:
       - log_environment
       - run_utests_compression_repeat
 
+  utests_trie_repeat:
+    <<: *j8_repeated_utest_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - log_environment
+      - run_utests_trie_repeat
+
   utests_system_keyspace_directory_repeat:
     <<: *j8_repeated_utest_executor
     steps:
@@ -1843,6 +1884,14 @@ commands:
           count: ${REPEATED_UTESTS_COUNT}
           stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
 
+  run_utests_trie_repeat:
+    steps:
+      - run_repeated_utests:
+          target: test-trie
+          tests: ${REPEATED_UTESTS}
+          count: ${REPEATED_UTESTS_COUNT}
+          stop_on_failure: ${REPEATED_TESTS_STOP_ON_FAILURE}
+
   run_utests_system_keyspace_directory_repeat:
     steps:
       - run_repeated_utests:
@@ -1971,6 +2020,7 @@ commands:
                 if [[ $target == "test" || \
                       $target == "test-cdc" || \
                       $target == "test-compression" || \
+                      $target == "test-trie" || \
                       $target == "test-system-keyspace-directory" || \
                       $target == "fqltool-test" || \
                       $target == "long-test" || \
@@ -2097,6 +2147,7 @@ commands:
                 if [[ $target == "test" || \
                       $target == "test-cdc" || \
                       $target == "test-compression" || \
+                      $target == "test-trie" || \
                       $target == "test-system-keyspace-directory" || \
                       $target == "fqltool-test" || \
                       $target == "long-test" || \
diff --git a/.circleci/config-2_1.yml.high_res.patch b/.circleci/config-2_1.yml.high_res.patch
index feeab90daf..669f07c48b 100644
--- a/.circleci/config-2_1.yml.high_res.patch
+++ b/.circleci/config-2_1.yml.high_res.patch
@@ -1,6 +1,6 @@
---- config-2_1.yml	2022-10-15 12:44:53.884924328 +0100
-+++ config-2_1.yml.HIGHRES	2022-10-15 12:45:37.970163268 +0100
-@@ -148,14 +148,14 @@
+--- config-2_1.yml	2022-10-19 18:01:31.517748550 +0300
++++ config-2_1.yml.HIGHRES	2022-10-19 18:06:37.789192360 +0300
+@@ -149,14 +149,14 @@
  j8_par_executor: &j8_par_executor
    executor:
      name: java8-executor
@@ -19,7 +19,7 @@
  
  j8_small_executor: &j8_small_executor
    executor:
-@@ -166,62 +166,68 @@
+@@ -167,62 +167,68 @@
  j8_medium_par_executor: &j8_medium_par_executor
    executor:
      name: java8-executor
diff --git a/.circleci/config-2_1.yml.mid_res.patch b/.circleci/config-2_1.yml.mid_res.patch
index 6417c470b2..c1eef323c8 100644
--- a/.circleci/config-2_1.yml.mid_res.patch
+++ b/.circleci/config-2_1.yml.mid_res.patch
@@ -1,6 +1,6 @@
---- config-2_1.yml	2022-10-15 12:44:53.884924328 +0100
-+++ config-2_1.yml.MIDRES	2022-10-15 12:45:37.963422950 +0100
-@@ -148,14 +148,14 @@
+--- config-2_1.yml	2022-10-19 18:01:31.517748550 +0300
++++ config-2_1.yml.MIDRES	2022-10-19 18:06:26.777068521 +0300
+@@ -149,14 +149,14 @@
  j8_par_executor: &j8_par_executor
    executor:
      name: java8-executor
@@ -19,7 +19,7 @@
  
  j8_small_executor: &j8_small_executor
    executor:
-@@ -163,29 +163,41 @@
+@@ -164,29 +164,41 @@
      exec_resource_class: medium
    parallelism: 1
  
@@ -68,7 +68,7 @@
  
  j11_small_executor: &j11_small_executor
    executor:
-@@ -193,35 +205,47 @@
+@@ -194,35 +206,47 @@
      #exec_resource_class: medium
    parallelism: 1
  
@@ -122,7 +122,7 @@
  
  j8_separate_jobs: &j8_separate_jobs
    jobs:
-@@ -1045,7 +1069,7 @@
+@@ -1078,7 +1102,7 @@
            target: testclasslist-system-keyspace-directory
  
    j8_dtests_vnode:
@@ -131,7 +131,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1059,7 +1083,7 @@
+@@ -1092,7 +1116,7 @@
            pytest_extra_args: '--use-vnodes --num-tokens=16 --skip-resource-intensive-tests'
  
    j11_dtests_vnode:
@@ -140,7 +140,7 @@
      steps:
      - attach_workspace:
          at: /home/cassandra
-@@ -1074,7 +1098,7 @@
+@@ -1107,7 +1131,7 @@
          pytest_extra_args: '--use-vnodes --num-tokens=16 --skip-resource-intensive-tests'
  
    j8_dtests:
@@ -149,7 +149,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1088,7 +1112,7 @@
+@@ -1121,7 +1145,7 @@
            pytest_extra_args: '--skip-resource-intensive-tests'
  
    j11_dtests:
@@ -158,7 +158,7 @@
      steps:
      - attach_workspace:
          at: /home/cassandra
-@@ -1103,7 +1127,7 @@
+@@ -1136,7 +1160,7 @@
          pytest_extra_args: '--skip-resource-intensive-tests'
  
    j8_upgrade_dtests:
@@ -167,7 +167,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1117,7 +1141,7 @@
+@@ -1150,7 +1174,7 @@
            pytest_extra_args: '--execute-upgrade-tests-only --upgrade-target-version-only --upgrade-version-selection all'
  
    j8_cqlsh_dtests_py3_vnode:
@@ -176,7 +176,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1132,7 +1156,7 @@
+@@ -1165,7 +1189,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j8_cqlsh_dtests_py38_vnode:
@@ -185,7 +185,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1150,7 +1174,7 @@
+@@ -1183,7 +1207,7 @@
            python_version: '3.8'
  
    j8_cqlsh_dtests_py3:
@@ -194,7 +194,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1165,7 +1189,7 @@
+@@ -1198,7 +1222,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j8_cqlsh_dtests_py38:
@@ -203,7 +203,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1183,7 +1207,7 @@
+@@ -1216,7 +1240,7 @@
            python_version: '3.8'
  
    j11_cqlsh_dtests_py3_vnode:
@@ -212,7 +212,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1198,7 +1222,7 @@
+@@ -1231,7 +1255,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j11_cqlsh_dtests_py38_vnode:
@@ -221,7 +221,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1216,7 +1240,7 @@
+@@ -1249,7 +1273,7 @@
            python_version: '3.8'
  
    j11_cqlsh_dtests_py3:
@@ -230,7 +230,7 @@
      steps:
        - attach_workspace:
            at: /home/cassandra
-@@ -1231,7 +1255,7 @@
+@@ -1264,7 +1288,7 @@
            extra_env_args: 'CQLSH_PYTHON=/usr/bin/python3.6'
  
    j11_cqlsh_dtests_py38:
diff --git a/.circleci/config.yml b/.circleci/config.yml
index f7fb72d98f..7cd748115f 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -170,7 +170,96 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  utests_trie_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -414,7 +503,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -983,6 +1072,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -1577,7 +1667,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1666,7 +1756,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1887,7 +1977,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2131,7 +2221,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2323,7 +2413,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2648,7 +2738,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2821,7 +2911,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3014,7 +3104,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3410,7 +3500,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3838,7 +3928,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4072,6 +4162,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -4756,7 +4847,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4811,6 +4902,123 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  utests_trie:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist-trie)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.unit.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist-trie   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   utests_system_keyspace_directory:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -5445,6 +5653,12 @@ workflows:
         requires:
         - start_utests_compression
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
@@ -5578,6 +5792,12 @@ workflows:
         requires:
         - start_utests_compression
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
diff --git a/.circleci/config.yml.HIGHRES b/.circleci/config.yml.HIGHRES
index 7e5c9d1359..d5f5d1c4a0 100644
--- a/.circleci/config.yml.HIGHRES
+++ b/.circleci/config.yml.HIGHRES
@@ -170,7 +170,96 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  utests_trie_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: xlarge
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 100
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -414,7 +503,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -983,6 +1072,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -1577,7 +1667,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1666,7 +1756,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1887,7 +1977,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2131,7 +2221,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2323,7 +2413,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2648,7 +2738,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2821,7 +2911,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3014,7 +3104,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3410,7 +3500,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3838,7 +3928,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4072,6 +4162,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -4756,7 +4847,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4811,6 +4902,123 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  utests_trie:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: xlarge
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 100
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist-trie)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.unit.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist-trie   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   utests_system_keyspace_directory:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -5487,6 +5695,18 @@ workflows:
         requires:
         - start_utests_compression_repeat
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
+    - start_utests_trie_repeat:
+        type: approval
+    - utests_trie_repeat:
+        requires:
+        - start_utests_trie_repeat
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
@@ -5709,6 +5929,16 @@ workflows:
         requires:
         - start_utests_compression
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
+    - utests_trie_repeat:
+        requires:
+        - start_utests_trie
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
diff --git a/.circleci/config.yml.LOWRES b/.circleci/config.yml.LOWRES
index 329683f6c8..9702bd0881 100644
--- a/.circleci/config.yml.LOWRES
+++ b/.circleci/config.yml.LOWRES
@@ -170,7 +170,96 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  utests_trie_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -414,7 +503,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -983,6 +1072,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -1577,7 +1667,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1666,7 +1756,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1887,7 +1977,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2131,7 +2221,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2323,7 +2413,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2648,7 +2738,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2821,7 +2911,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3014,7 +3104,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3410,7 +3500,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3838,7 +3928,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4072,6 +4162,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -4756,7 +4847,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4811,6 +4902,123 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  utests_trie:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 4
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist-trie)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.unit.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist-trie   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   utests_system_keyspace_directory:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -5487,6 +5695,18 @@ workflows:
         requires:
         - start_utests_compression_repeat
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
+    - start_utests_trie_repeat:
+        type: approval
+    - utests_trie_repeat:
+        requires:
+        - start_utests_trie_repeat
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
@@ -5709,6 +5929,16 @@ workflows:
         requires:
         - start_utests_compression
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
+    - utests_trie_repeat:
+        requires:
+        - start_utests_trie
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
diff --git a/.circleci/config.yml.MIDRES b/.circleci/config.yml.MIDRES
index ad2e4c6f53..2a58eecb8f 100644
--- a/.circleci/config.yml.MIDRES
+++ b/.circleci/config.yml.MIDRES
@@ -170,7 +170,96 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+    - store_test_results:
+        path: /tmp/results/repeated_utests/output
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/stdout
+        destination: stdout
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/results/repeated_utests/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+  utests_trie_repeat:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 25
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Repeatedly run new or modifed JUnit tests
+        no_output_timeout: 15m
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -414,7 +503,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_STRESS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_STRESS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and au [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -983,6 +1072,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -1577,7 +1667,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_LONG_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_LONG_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automa [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1666,7 +1756,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -1887,7 +1977,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2131,7 +2221,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2323,7 +2413,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_SIMULATOR_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_SIMULATOR_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2648,7 +2738,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_FQLTOOL_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_FQLTOOL_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and  [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -2821,7 +2911,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_UPGRADE_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_UPGRADE_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified te [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3014,7 +3104,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3410,7 +3500,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -3838,7 +3928,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_JVM_DTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_JVM_DTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automati [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4072,6 +4162,7 @@ jobs:
               if [[ $target == "test" || \
                     $target == "test-cdc" || \
                     $target == "test-compression" || \
+                    $target == "test-trie" || \
                     $target == "test-system-keyspace-directory" || \
                     $target == "fqltool-test" || \
                     $target == "long-test" || \
@@ -4756,7 +4847,7 @@ jobs:
     - run:
         name: Repeatedly run new or modifed JUnit tests
         no_output_timeout: 15m
-        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
+        command: "set -x\nexport PATH=$JAVA_HOME/bin:$PATH\ntime mv ~/cassandra /tmp\ncd /tmp/cassandra\nif [ -d ~/dtest_jars ]; then\n  cp ~/dtest_jars/dtest* /tmp/cassandra/build/\nfi\n\n# Calculate the number of test iterations to be run by the current parallel runner.\ncount=$((${REPEATED_UTESTS_COUNT} / CIRCLE_NODE_TOTAL))\nif (($CIRCLE_NODE_INDEX < (${REPEATED_UTESTS_COUNT} % CIRCLE_NODE_TOTAL))); then\n  count=$((count+1))\nfi\n\n# Put manually specified tests and automatically de [...]
     - store_test_results:
         path: /tmp/results/repeated_utests/output
     - store_artifacts:
@@ -4811,6 +4902,123 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  utests_trie:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 25
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Unit Tests (testclasslist-trie)
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          test_timeout=$(grep 'name="test.unit.timeout"' build.xml | awk -F'"' '{print $4}' || true)
+          if [ -z "$test_timeout" ]; then
+            test_timeout=$(grep 'name="test.timeout"' build.xml | awk -F'"' '{print $4}')
+          fi
+          ant testclasslist-trie   -Dtest.timeout="$test_timeout" -Dtest.classlistfile=/tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt  -Dtest.classlistprefix=unit
+        no_output_timeout: 15m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_TESTS_STOP_ON_FAILURE: false
+    - REPEATED_UTESTS: null
+    - REPEATED_UTESTS_COUNT: 500
+    - REPEATED_UTESTS_FQLTOOL: null
+    - REPEATED_UTESTS_FQLTOOL_COUNT: 500
+    - REPEATED_UTESTS_LONG: null
+    - REPEATED_UTESTS_LONG_COUNT: 100
+    - REPEATED_UTESTS_STRESS: null
+    - REPEATED_UTESTS_STRESS_COUNT: 500
+    - REPEATED_SIMULATOR_DTESTS: null
+    - REPEATED_SIMULATOR_DTESTS_COUNT: 500
+    - REPEATED_JVM_DTESTS: null
+    - REPEATED_JVM_DTESTS_COUNT: 500
+    - REPEATED_JVM_UPGRADE_DTESTS: null
+    - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500
+    - REPEATED_DTESTS: null
+    - REPEATED_DTESTS_COUNT: 500
+    - REPEATED_UPGRADE_DTESTS: null
+    - REPEATED_UPGRADE_DTESTS_COUNT: 25
+    - REPEATED_ANT_TEST_TARGET: testsome
+    - REPEATED_ANT_TEST_CLASS: null
+    - REPEATED_ANT_TEST_METHODS: null
+    - REPEATED_ANT_TEST_VNODES: false
+    - REPEATED_ANT_TEST_COUNT: 500
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   utests_system_keyspace_directory:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -5487,6 +5695,18 @@ workflows:
         requires:
         - start_utests_compression_repeat
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
+    - start_utests_trie_repeat:
+        type: approval
+    - utests_trie_repeat:
+        requires:
+        - start_utests_trie_repeat
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
@@ -5709,6 +5929,16 @@ workflows:
         requires:
         - start_utests_compression
         - j8_build
+    - start_utests_trie:
+        type: approval
+    - utests_trie:
+        requires:
+        - start_utests_trie
+        - j8_build
+    - utests_trie_repeat:
+        requires:
+        - start_utests_trie
+        - j8_build
     - start_utests_stress:
         type: approval
     - utests_stress:
diff --git a/.circleci/generate.sh b/.circleci/generate.sh
index 241b84092e..3697e089f0 100755
--- a/.circleci/generate.sh
+++ b/.circleci/generate.sh
@@ -259,6 +259,7 @@ if (! (echo "$env_vars" | grep -q "REPEATED_UTESTS=" )); then
   delete_job "j8_unit_tests_repeat"
   delete_job "j11_unit_tests_repeat"
   delete_job "utests_compression_repeat"
+  delete_job "utests_trie_repeat"
   delete_job "utests_system_keyspace_directory_repeat"
 fi
 if (! (echo "$env_vars" | grep -q "REPEATED_UTESTS_LONG=")); then
diff --git a/CHANGES.txt b/CHANGES.txt
index 1dd8917f17..ee01325b8b 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,6 @@
 4.2
+ * Adds a trie-based memtable implementation (CASSANDRA-17240)
+ * Further improves precision of memtable heap tracking (CASSANDRA-17240)
  * Fix formatting of metrics documentation (CASSANDRA-17961)
  * Keep sstable level when streaming for decommission and move (CASSANDRA-17969)
  * Add Unavailables metric for CASWrite in the docs (CASSANDRA-16357)
diff --git a/NEWS.txt b/NEWS.txt
index 2442572d7e..1f48643a81 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -57,6 +57,9 @@ using the provided 'sstableupgrade' tool.
 
 New features
 ------------
+    - Adds a trie-based memtable implementation, which improves memory use, garbage collection efficiency and lookup
+      performance. The new memtable is implemented by the TrieMemtable class and can be selected using the memtable
+      API, see src/java/org/apache/cassandra/db/memtable/Memtable_API.md.
     - Added a new configuration cdc_on_repair_enabled to toggle whether CDC mutations are replayed through the
       write path on streaming, e.g. repair. When enabled, CDC data streamed to the destination node will be written into
       commit log first. When disabled, the streamed CDC data is written into SSTables just the same as normal streaming.
diff --git a/build.xml b/build.xml
index f5088e97c8..90ccd09ed2 100644
--- a/build.xml
+++ b/build.xml
@@ -1190,7 +1190,27 @@
     </sequential>
   </macrodef>
 
-  <macrodef name="testlist-system-keyspace-directory">
+    <macrodef name="testlist-trie">
+        <attribute name="test.file.list" />
+        <sequential>
+            <property name="trie_yaml" value="${build.test.dir}/cassandra.trie.yaml"/>
+            <concat destfile="${trie_yaml}">
+                <fileset file="${test.conf}/cassandra.yaml"/>
+                <fileset file="${test.conf}/trie_memtable.yaml"/>
+            </concat>
+            <testmacrohelper inputdir="${test.unit.src}" filelist="@{test.file.list}"
+                             exclude="**/*.java" timeout="${test.timeout}" testtag="trie">
+                <jvmarg value="-Dlegacy-sstable-root=${test.data}/legacy-sstables"/>
+                <jvmarg value="-Dinvalid-legacy-sstable-root=${test.data}/invalid-legacy-sstables"/>
+                <jvmarg value="-Dcassandra.ring_delay_ms=1000"/>
+                <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
+                <jvmarg value="-Dcassandra.config=file:///${trie_yaml}"/>
+                <jvmarg value="-Dcassandra.skip_sync=true" />
+            </testmacrohelper>
+        </sequential>
+    </macrodef>
+
+    <macrodef name="testlist-system-keyspace-directory">
     <attribute name="test.file.list" />
     <sequential>
       <property name="system_keyspaces_directory_yaml" value="${build.test.dir}/cassandra.system.yaml"/>
@@ -1298,6 +1318,14 @@
     <testhelper testdelegate="testlist-cdc" />
   </target>
 
+  <target name="test-trie" depends="build-test" description="Execute unit tests with trie memtables">
+    <path id="all-test-classes-path">
+      <fileset dir="${test.unit.src}" includes="**/${test.name}.java" />
+    </path>
+    <property name="all-test-classes" refid="all-test-classes-path"/>
+    <testhelper testdelegate="testlist-trie" />
+  </target>
+
   <target name="test-system-keyspace-directory" depends="build-test" description="Execute unit tests with a system keyspaces directory configured">
     <path id="all-test-classes-path">
       <fileset dir="${test.unit.src}" includes="**/${test.name}.java" />
@@ -1545,6 +1573,13 @@
       <property name="all-test-classes" refid="all-test-classes-path"/>
       <testhelper testdelegate="testlist-compression"/>
   </target>
+  <target name="testclasslist-trie" depends="build-test" description="Run tests given in file -Dtest.classlistfile (one-class-per-line, e.g. org/apache/cassandra/db/SomeTest.java)">
+    <path id="all-test-classes-path">
+        <fileset dir="${test.dir}/${test.classlistprefix}" includesfile="${test.classlistfile}"/>
+    </path>
+    <property name="all-test-classes" refid="all-test-classes-path"/>
+    <testhelper testdelegate="testlist-trie"/>
+  </target>
   <target name="testclasslist-cdc" depends="build-test" description="Run tests given in file -Dtest.classlistfile (one-class-per-line, e.g. org/apache/cassandra/db/SomeTest.java)">
       <path id="all-test-classes-path">
           <fileset dir="${test.dir}/${test.classlistprefix}" includesfile="${test.classlistfile}"/>
diff --git a/src/java/org/apache/cassandra/db/AbstractArrayClusteringPrefix.java b/src/java/org/apache/cassandra/db/AbstractArrayClusteringPrefix.java
index 211eeb0460..16fb0807a5 100644
--- a/src/java/org/apache/cassandra/db/AbstractArrayClusteringPrefix.java
+++ b/src/java/org/apache/cassandra/db/AbstractArrayClusteringPrefix.java
@@ -49,7 +49,8 @@ public abstract class AbstractArrayClusteringPrefix extends AbstractOnHeapCluste
         return out;
     }
 
-    public ClusteringPrefix<byte[]> minimize()
+    @Override
+    public ClusteringPrefix<byte[]> retainable()
     {
         return this;
     }
diff --git a/src/java/org/apache/cassandra/db/AbstractBufferClusteringPrefix.java b/src/java/org/apache/cassandra/db/AbstractBufferClusteringPrefix.java
index 457d0c4bef..b47a3a3c59 100644
--- a/src/java/org/apache/cassandra/db/AbstractBufferClusteringPrefix.java
+++ b/src/java/org/apache/cassandra/db/AbstractBufferClusteringPrefix.java
@@ -42,10 +42,19 @@ public abstract class AbstractBufferClusteringPrefix extends AbstractOnHeapClust
         return getRawValues();
     }
 
-    public ClusteringPrefix<ByteBuffer> minimize()
+    @Override
+    public ClusteringPrefix<ByteBuffer> retainable()
     {
         if (!ByteBufferUtil.canMinimize(values))
             return this;
-        return new BufferClustering(ByteBufferUtil.minimizeBuffers(values));
+
+        ByteBuffer[] minimizedValues = ByteBufferUtil.minimizeBuffers(this.values);
+        if (kind.isBoundary())
+            return accessor().factory().boundary(kind, minimizedValues);
+        if (kind.isBound())
+            return accessor().factory().bound(kind, minimizedValues);
+
+        assert kind() != Kind.STATIC_CLUSTERING;    // not minimizable
+        return accessor().factory().clustering(minimizedValues);
     }
 }
diff --git a/src/java/org/apache/cassandra/db/BufferDecoratedKey.java b/src/java/org/apache/cassandra/db/BufferDecoratedKey.java
index ae3e9d44e0..07f610fd7c 100644
--- a/src/java/org/apache/cassandra/db/BufferDecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/BufferDecoratedKey.java
@@ -39,6 +39,12 @@ public class BufferDecoratedKey extends DecoratedKey
         return key;
     }
 
+    @Override
+    public int getKeyLength()
+    {
+        return key.remaining();
+    }
+
     /**
      * A factory method that translates the given byte-comparable representation to a {@link BufferDecoratedKey}
      * instance. If the given byte comparable doesn't represent the encoding of a buffer decorated key, anything from a
diff --git a/src/java/org/apache/cassandra/db/ClusteringPrefix.java b/src/java/org/apache/cassandra/db/ClusteringPrefix.java
index c7a2782ece..0320c0457d 100644
--- a/src/java/org/apache/cassandra/db/ClusteringPrefix.java
+++ b/src/java/org/apache/cassandra/db/ClusteringPrefix.java
@@ -34,7 +34,6 @@ import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteArrayUtil;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
 import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
@@ -361,10 +360,11 @@ public interface ClusteringPrefix<V> extends IMeasurableMemory, Clusterable<V>
     public ByteBuffer[] getBufferArray();
 
     /**
-     * If the prefix contains byte buffers that can be minimized (see {@link ByteBufferUtil#minimalBufferFor(ByteBuffer)}),
-     * this will return a copy of the prefix with minimized values, otherwise it returns itself.
+     * Return the key in a form that can be retained for longer-term use. This means extracting keys stored in shared
+     * memory (i.e. in memtables) to minimized on-heap versions.
+     * If the object is already in minimal form, no action will be taken.
      */
-    public ClusteringPrefix<V> minimize();
+    public ClusteringPrefix<V> retainable();
 
     public static class Serializer
     {
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index 8ebf896efd..ebe4aeba8a 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -1451,41 +1451,63 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean, Memtable.Owner
     @Override
     public ShardBoundaries localRangeSplits(int shardCount)
     {
-        if (shardCount == 1 || !getPartitioner().splitter().isPresent() || SchemaConstants.isLocalSystemKeyspace(keyspace.getName()))
+        if (shardCount == 1 || !getPartitioner().splitter().isPresent())
             return ShardBoundaries.NONE;
 
         ShardBoundaries shardBoundaries = cachedShardBoundaries;
+
         if (shardBoundaries == null ||
             shardBoundaries.shardCount() != shardCount ||
-            shardBoundaries.ringVersion != StorageService.instance.getTokenMetadata().getRingVersion())
+            shardBoundaries.ringVersion != -1 && shardBoundaries.ringVersion != StorageService.instance.getTokenMetadata().getRingVersion())
         {
-            DiskBoundaryManager.VersionedRangesAtEndpoint versionedLocalRanges = DiskBoundaryManager.getVersionedLocalRanges(this);
-            Set<Range<Token>> localRanges = versionedLocalRanges.rangesAtEndpoint.ranges();
             List<Splitter.WeightedRange> weightedRanges;
-            if (localRanges.isEmpty())
-                weightedRanges = ImmutableList.of(new Splitter.WeightedRange(1.0, new Range<>(getPartitioner().getMinimumToken(), getPartitioner().getMaximumToken())));
-            else
+            long ringVersion;
+            if (!SchemaConstants.isLocalSystemKeyspace(keyspace.getName())
+                && getPartitioner() == StorageService.instance.getTokenMetadata().partitioner)
             {
-                weightedRanges = new ArrayList<>(localRanges.size());
-                for (Range<Token> r : localRanges)
+                DiskBoundaryManager.VersionedRangesAtEndpoint versionedLocalRanges = DiskBoundaryManager.getVersionedLocalRanges(this);
+                Set<Range<Token>> localRanges = versionedLocalRanges.rangesAtEndpoint.ranges();
+                ringVersion = versionedLocalRanges.ringVersion;
+
+                if (!localRanges.isEmpty())
+                {
+                    weightedRanges = new ArrayList<>(localRanges.size());
+                    for (Range<Token> r : localRanges)
+                    {
+                        // WeightedRange supports only unwrapped ranges as it relies
+                        // on right - left == num tokens equality
+                        for (Range<Token> u: r.unwrap())
+                            weightedRanges.add(new Splitter.WeightedRange(1.0, u));
+                    }
+                    weightedRanges.sort(Comparator.comparing(Splitter.WeightedRange::left));
+                }
+                else
                 {
-                    // WeightedRange supports only unwrapped ranges as it relies
-                    // on right - left == num tokens equality
-                    for (Range<Token> u: r.unwrap())
-                        weightedRanges.add(new Splitter.WeightedRange(1.0, u));
+                    weightedRanges = fullWeightedRange();
                 }
-                weightedRanges.sort(Comparator.comparing(Splitter.WeightedRange::left));
+            }
+            else
+            {
+                // Local tables need to cover the full token range and don't care about ring changes.
+                // We also end up here if the table's partitioner is not the database's, which can happen in tests.
+                weightedRanges = fullWeightedRange();
+                ringVersion = -1;
             }
 
             List<Token> boundaries = getPartitioner().splitter().get().splitOwnedRanges(shardCount, weightedRanges, false);
             shardBoundaries = new ShardBoundaries(boundaries.subList(0, boundaries.size() - 1),
-                                                  versionedLocalRanges.ringVersion);
+                                                  ringVersion);
             cachedShardBoundaries = shardBoundaries;
             logger.debug("Memtable shard boundaries for {}.{}: {}", keyspace.getName(), getTableName(), boundaries);
         }
         return shardBoundaries;
     }
 
+    private ImmutableList<Splitter.WeightedRange> fullWeightedRange()
+    {
+        return ImmutableList.of(new Splitter.WeightedRange(1.0, new Range<>(getPartitioner().getMinimumToken(), getPartitioner().getMaximumToken())));
+    }
+
     /**
      * @param sstables
      * @return sstables whose key range overlaps with that of the given sstables, not including itself.
diff --git a/src/java/org/apache/cassandra/db/DecoratedKey.java b/src/java/org/apache/cassandra/db/DecoratedKey.java
index 569c86d9d8..914566e962 100644
--- a/src/java/org/apache/cassandra/db/DecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/DecoratedKey.java
@@ -198,6 +198,7 @@ public abstract class DecoratedKey implements PartitionPosition, FilterKey
     }
 
     public abstract ByteBuffer getKey();
+    public abstract int getKeyLength();
 
     public void filterHash(long[] dest)
     {
diff --git a/src/java/org/apache/cassandra/db/NativeClustering.java b/src/java/org/apache/cassandra/db/NativeClustering.java
index 0e4c19db17..1b6761d3d4 100644
--- a/src/java/org/apache/cassandra/db/NativeClustering.java
+++ b/src/java/org/apache/cassandra/db/NativeClustering.java
@@ -25,6 +25,7 @@ import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.ValueAccessor;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.memory.HeapCloner;
 import org.apache.cassandra.utils.memory.MemoryUtil;
 import org.apache.cassandra.utils.memory.NativeAllocator;
 
@@ -36,11 +37,6 @@ public class NativeClustering implements Clustering<ByteBuffer>
 
     private NativeClustering() { peer = 0; }
 
-    public ClusteringPrefix<ByteBuffer> minimize()
-    {
-        return this;
-    }
-
     public NativeClustering(NativeAllocator allocator, OpOrder.Group writeOp, Clustering<?> clustering)
     {
         int count = clustering.size();
@@ -157,4 +153,20 @@ public class NativeClustering implements Clustering<ByteBuffer>
     {
         return ClusteringPrefix.equals(this, o);
     }
+
+    @Override
+    public ClusteringPrefix<ByteBuffer> retainable()
+    {
+        assert kind() == Kind.CLUSTERING; // tombstones are never stored natively
+
+        // always extract
+        ByteBuffer[] values = new ByteBuffer[size()];
+        for (int i = 0; i < values.length; ++i)
+        {
+            ByteBuffer value = get(i);
+            values[i] = value != null ? HeapCloner.instance.clone(value) : null;
+        }
+
+        return accessor().factory().clustering(values);
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/NativeDecoratedKey.java b/src/java/org/apache/cassandra/db/NativeDecoratedKey.java
index 35f31150df..bc149084d8 100644
--- a/src/java/org/apache/cassandra/db/NativeDecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/NativeDecoratedKey.java
@@ -72,6 +72,12 @@ public class NativeDecoratedKey extends DecoratedKey
         return MemoryUtil.getByteBuffer(address(), length(), ByteOrder.BIG_ENDIAN);
     }
 
+    @Override
+    public int getKeyLength()
+    {
+        return MemoryUtil.getInt(peer);
+    }
+
     @Override
     protected ByteSource keyComparableBytes(Version version)
     {
diff --git a/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java b/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
index 66556a16e4..c39b561a16 100644
--- a/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
+++ b/src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
@@ -67,6 +67,9 @@ public abstract class AbstractAllocatorMemtable extends AbstractMemtableWithComm
     // memtable was created with the new or old comparator.
     @Unmetered
     protected final ClusteringComparator initialComparator;
+    // As above, used to determine if the memtable needs to be flushed on schema change.
+    @Unmetered
+    public final Factory initialFactory;
 
     private final long creationNano = Clock.Global.nanoTime();
 
@@ -104,6 +107,7 @@ public abstract class AbstractAllocatorMemtable extends AbstractMemtableWithComm
         super(metadataRef, commitLogLowerBound);
         this.allocator = MEMORY_POOL.newAllocator(metadataRef.toString());
         this.initialComparator = metadata.get().comparator;
+        this.initialFactory = metadata().params.memtable.factory();
         this.owner = owner;
         scheduleFlush();
     }
@@ -120,7 +124,7 @@ public abstract class AbstractAllocatorMemtable extends AbstractMemtableWithComm
         {
         case SCHEMA_CHANGE:
             return initialComparator != metadata().comparator // If the CF comparator has changed, because our partitions reference the old one
-                   || metadata().params.memtable.factory() != factory(); // If a different type of memtable is requested
+                   || !initialFactory.equals(metadata().params.memtable.factory()); // If a different type of memtable is requested
         case OWNED_RANGES_CHANGE:
             return false; // by default we don't use the local ranges, thus this has no effect
         default:
@@ -145,8 +149,6 @@ public abstract class AbstractAllocatorMemtable extends AbstractMemtableWithComm
         throw new AssertionError("performSnapshot must be implemented if shouldSwitch(SNAPSHOT) can return false.");
     }
 
-    protected abstract Factory factory();
-
     public void switchOut(OpOrder.Barrier writeBarrier, AtomicReference<CommitLogPosition> commitLogUpperBound)
     {
         super.switchOut(writeBarrier, commitLogUpperBound);
diff --git a/src/java/org/apache/cassandra/db/memtable/AbstractShardedMemtable.java b/src/java/org/apache/cassandra/db/memtable/AbstractShardedMemtable.java
new file mode 100644
index 0000000000..f4948f089f
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/AbstractShardedMemtable.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.MBeanWrapper;
+import org.github.jamm.Unmetered;
+
+public abstract class AbstractShardedMemtable extends AbstractAllocatorMemtable
+{
+    private static final Logger logger = LoggerFactory.getLogger(AbstractShardedMemtable.class);
+
+    public static final String SHARDS_OPTION = "shards";
+    @VisibleForTesting
+    public static final String DEFAULT_SHARD_COUNT_PROPERTY = "cassandra.memtable.shard.count";
+
+    public static final String SHARDED_MEMTABLE_CONFIG_OBJECT_NAME = "org.apache.cassandra.db:type=ShardedMemtableConfig";
+    static
+    {
+        MBeanWrapper.instance.registerMBean(new ShardedMemtableConfig(), SHARDED_MEMTABLE_CONFIG_OBJECT_NAME, MBeanWrapper.OnException.LOG);
+    }
+
+    // default shard count, used when a specific number of shards is not specified in the options
+    private static volatile int defaultShardCount = Integer.getInteger(DEFAULT_SHARD_COUNT_PROPERTY, FBUtilities.getAvailableProcessors());
+
+    // The boundaries for the keyspace as they were calculated when the memtable is created.
+    // The boundaries will be NONE for system keyspaces or if StorageService is not yet initialized.
+    // The fact this is fixed for the duration of the memtable lifetime, guarantees we'll always pick the same shard
+    // for a given key, even if we race with the StorageService initialization or with topology changes.
+    @Unmetered
+    protected final ShardBoundaries boundaries;
+
+    AbstractShardedMemtable(AtomicReference<CommitLogPosition> commitLogLowerBound,
+                            TableMetadataRef metadataRef,
+                            Owner owner,
+                            Integer shardCountOption)
+    {
+        super(commitLogLowerBound, metadataRef, owner);
+        int shardCount = shardCountOption != null ? shardCountOption : defaultShardCount;
+        this.boundaries = owner.localRangeSplits(shardCount);
+    }
+
+    private static class ShardedMemtableConfig implements ShardedMemtableConfigMXBean
+    {
+        @Override
+        public void setDefaultShardCount(String shardCount)
+        {
+            if ("auto".equalsIgnoreCase(shardCount))
+            {
+                defaultShardCount = FBUtilities.getAvailableProcessors();
+            }
+            else
+            {
+                try
+                {
+                    defaultShardCount = Integer.parseInt(shardCount);
+                }
+                catch (NumberFormatException ex)
+                {
+                    logger.warn("Unable to parse {} as valid value for shard count", shardCount);
+                    return;
+                }
+            }
+            logger.info("Requested setting shard count to {}; set to: {}", shardCount, defaultShardCount);
+        }
+
+        @Override
+        public String getDefaultShardCount()
+        {
+            return Integer.toString(defaultShardCount);
+        }
+    }
+
+    public static int getDefaultShardCount()
+    {
+        return defaultShardCount;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/memtable/Memtable_API.md b/src/java/org/apache/cassandra/db/memtable/Memtable_API.md
index 39f9b201af..e5399641c3 100644
--- a/src/java/org/apache/cassandra/db/memtable/Memtable_API.md
+++ b/src/java/org/apache/cassandra/db/memtable/Memtable_API.md
@@ -45,13 +45,32 @@ memtable:
           class_name: SkipListMemtable
         sharded:
           class_name: ShardedSkipListMemtable
+        trie:
+          class_name: TrieMemtable
         default:
-          inherits: sharded
+          inherits: trie
 ```
 
 Note that the database will only validate the memtable class and its parameters when a configuration needs to be
 instantiated for a table.
 
+## Implementations provided
+
+Cassandra currently comes with three memtable implementations:
+
+- `SkipListMemtable` is the default and matches the memtable format of Cassandra versions up to 4.1. It organizes
+  partitions into a single concurrent skip list.
+- `ShardedSkipListMemtable` splits the partition skip-list into several independent skip-lists each covering a roughly
+  equal part of the token space served by this node. This reduces congestion of the skip-list from concurrent writes and
+  can lead to improved write throughput. Its configuration takes two parameters:
+  - `shards`: the number of shards to split into, defaulting to the number of CPU cores on the machine.
+  - `serialize_writes`: if false (default), each shard may serve multiple writes in parallel; if true, writes to each
+    shard are synchronized.
+- `TrieMemtable` is a novel solution that organizes partitions into an in-memory trie which places the partition
+  indexing structure in a buffer, off-heap if desired, which significantly improves garbage collection efficiency. It
+  also improves the memtable's space efficiency and lookup performance. Its configuration can take a single parameter
+  `shards` as above.
+
 ## Memtable selection
 
 Once a configuration has been defined, it can be used by specifying it in the `memtable` parameter of a `CREATE TABLE`
diff --git a/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java b/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java
index fb9cc98426..864899f6a4 100644
--- a/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java
+++ b/src/java/org/apache/cassandra/db/memtable/ShardBoundaries.java
@@ -22,7 +22,6 @@ import java.util.List;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.PartitionPosition;
 import org.apache.cassandra.dht.Token;
@@ -83,7 +82,7 @@ public class ShardBoundaries
         if (boundaries.length == 0)
             return 0;
 
-        assert (key.getPartitioner() == DatabaseDescriptor.getPartitioner());
+        assert (key.getPartitioner() == boundaries[0].getPartitioner());
         return getShardForToken(key.getToken());
     }
 
diff --git a/test/unit/org/apache/cassandra/db/ClusteringBoundTest.java b/src/java/org/apache/cassandra/db/memtable/ShardedMemtableConfigMXBean.java
similarity index 51%
rename from test/unit/org/apache/cassandra/db/ClusteringBoundTest.java
rename to src/java/org/apache/cassandra/db/memtable/ShardedMemtableConfigMXBean.java
index 20fcc2086c..60ff10e8e1 100644
--- a/test/unit/org/apache/cassandra/db/ClusteringBoundTest.java
+++ b/src/java/org/apache/cassandra/db/memtable/ShardedMemtableConfigMXBean.java
@@ -16,28 +16,18 @@
  * limitations under the License.
  */
 
-package org.apache.cassandra.db;
+package org.apache.cassandra.db.memtable;
 
-import org.junit.Assert;
-import org.junit.Test;
-
-public class ClusteringBoundTest
+public interface ShardedMemtableConfigMXBean
 {
-    @Test
-    public void arrayTopAndBottom()
-    {
-        Assert.assertTrue(ArrayClusteringBound.BOTTOM.isBottom());
-        Assert.assertFalse(ArrayClusteringBound.BOTTOM.isTop());
-        Assert.assertTrue(ArrayClusteringBound.TOP.isTop());
-        Assert.assertFalse(ArrayClusteringBound.TOP.isBottom());
-    }
+    /**
+     * Adjust the shard count for sharded memtables that do not specify it explicitly in the memtable options.
+     * Changes will apply on the next memtable flush.
+     */
+    public void setDefaultShardCount(String numShards);
 
-    @Test
-    public void bufferTopAndBottom()
-    {
-        Assert.assertTrue(BufferClusteringBound.BOTTOM.isBottom());
-        Assert.assertFalse(BufferClusteringBound.BOTTOM.isTop());
-        Assert.assertTrue(BufferClusteringBound.TOP.isTop());
-        Assert.assertFalse(BufferClusteringBound.TOP.isBottom());
-    }
+    /**
+     * Returns the shard count for sharded memtables that do not specify it explicitly in the memtable options.
+     */
+    public String getDefaultShardCount();
 }
diff --git a/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java b/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
index e1a192ed70..bb6c0cca36 100644
--- a/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
+++ b/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
@@ -41,6 +41,7 @@ import org.apache.cassandra.db.filter.ClusteringIndexFilter;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
 import org.apache.cassandra.db.partitions.AtomicBTreePartition;
+import org.apache.cassandra.db.partitions.BTreePartitionUpdater;
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
@@ -54,7 +55,6 @@ import org.apache.cassandra.index.transactions.UpdateTransaction;
 import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.Cloner;
 import org.apache.cassandra.utils.memory.MemtableAllocator;
@@ -72,46 +72,26 @@ import org.github.jamm.Unmetered;
  *
  * Also see Memtable_API.md.
  */
-public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
+public class ShardedSkipListMemtable extends AbstractShardedMemtable
 {
     private static final Logger logger = LoggerFactory.getLogger(ShardedSkipListMemtable.class);
 
-    public static final String SHARDS_OPTION = "shards";
     public static final String LOCKING_OPTION = "serialize_writes";
 
-    // The boundaries for the keyspace as they were calculated when the memtable is created.
-    // The boundaries will be NONE for system keyspaces or if StorageService is not yet initialized.
-    // The fact this is fixed for the duration of the memtable lifetime, guarantees we'll always pick the same shard
-    // for a given key, even if we race with the StorageService initialization or with topology changes.
-    @Unmetered
-    final ShardBoundaries boundaries;
-
     /**
-     * Core-specific memtable regions. All writes must go through the specific core. The data structures used
-     * are concurrent-read safe, thus reads can be carried out from any thread.
+     * Sharded memtable sections. Each is responsible for a contiguous range of the token space (between boundaries[i]
+     * and boundaries[i+1]) and is written to by one thread at a time, while reads are carried out concurrently
+     * (including with any write).
      */
     final MemtableShard[] shards;
 
-    @VisibleForTesting
-    public static final String SHARD_COUNT_PROPERTY = "cassandra.memtable.shard.count";
-
-    // default shard count, used when a specific number of shards is not specified in the parameters
-    private static final int SHARD_COUNT = Integer.getInteger(SHARD_COUNT_PROPERTY, FBUtilities.getAvailableProcessors());
-
-    private final Factory factory;
-
-    // only to be used by init(), to setup the very first memtable for the cfs
     ShardedSkipListMemtable(AtomicReference<CommitLogPosition> commitLogLowerBound,
                             TableMetadataRef metadataRef,
                             Owner owner,
-                            Integer shardCountOption,
-                            Factory factory)
+                            Integer shardCountOption)
     {
-        super(commitLogLowerBound, metadataRef, owner);
-        int shardCount = shardCountOption != null ? shardCountOption : SHARD_COUNT;
-        this.boundaries = owner.localRangeSplits(shardCount);
+        super(commitLogLowerBound, metadataRef, owner, shardCountOption);
         this.shards = generatePartitionShards(boundaries.shardCount(), allocator, metadataRef);
-        this.factory = factory;
     }
 
     private static MemtableShard[] generatePartitionShards(int splits,
@@ -128,17 +108,11 @@ public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
     public boolean isClean()
     {
         for (MemtableShard shard : shards)
-            if (!shard.isEmpty())
+            if (!shard.isClean())
                 return false;
         return true;
     }
 
-    @Override
-    protected Memtable.Factory factory()
-    {
-        return factory;
-    }
-
     /**
      * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the appropriate
      * OpOrdering.
@@ -389,14 +363,14 @@ public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
                 }
             }
 
-            long[] pair = previous.addAllWithSizeDelta(update, cloner, opGroup, indexer);
+            BTreePartitionUpdater updater = previous.addAll(update, cloner, opGroup, indexer);
             updateMin(minTimestamp, update.stats().minTimestamp);
             updateMin(minLocalDeletionTime, update.stats().minLocalDeletionTime);
-            liveDataSize.addAndGet(initialSize + pair[0]);
+            liveDataSize.addAndGet(initialSize + updater.dataSize);
             columnsCollector.update(update.columns());
             statsCollector.update(update.stats());
             currentOperations.addAndGet(update.operationCount());
-            return pair[1];
+            return updater.colUpdateTimeDelta;
         }
 
         private Map<PartitionPosition, AtomicBTreePartition> getPartitionsSubMap(PartitionPosition left,
@@ -425,7 +399,7 @@ public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
             }
         }
 
-        public boolean isEmpty()
+        public boolean isClean()
         {
             return partitions.isEmpty();
         }
@@ -493,9 +467,9 @@ public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
 
     static class Locking extends ShardedSkipListMemtable
     {
-        Locking(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadataRef, Owner owner, Integer shardCountOption, Factory factory)
+        Locking(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadataRef, Owner owner, Integer shardCountOption)
         {
-            super(commitLogLowerBound, metadataRef, owner, shardCountOption, factory);
+            super(commitLogLowerBound, metadataRef, owner, shardCountOption);
         }
 
         /**
@@ -540,8 +514,8 @@ public class ShardedSkipListMemtable extends AbstractAllocatorMemtable
                                Owner owner)
         {
             return isLocking
-                   ? new Locking(commitLogLowerBound, metadataRef, owner, shardCount, this)
-                   : new ShardedSkipListMemtable(commitLogLowerBound, metadataRef, owner, shardCount, this);
+                   ? new Locking(commitLogLowerBound, metadataRef, owner, shardCount)
+                   : new ShardedSkipListMemtable(commitLogLowerBound, metadataRef, owner, shardCount);
         }
 
         public boolean equals(Object o)
diff --git a/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java b/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
index 99cb237250..3c8dfb3104 100644
--- a/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
+++ b/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
@@ -37,9 +37,10 @@ import org.apache.cassandra.db.Slices;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
 import org.apache.cassandra.db.filter.ClusteringIndexFilter;
 import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.partitions.AbstractBTreePartition;
 import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
 import org.apache.cassandra.db.partitions.AtomicBTreePartition;
+import org.apache.cassandra.db.partitions.BTreePartitionData;
+import org.apache.cassandra.db.partitions.BTreePartitionUpdater;
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
@@ -90,12 +91,6 @@ public class SkipListMemtable extends AbstractAllocatorMemtable
         super(commitLogLowerBound, metadataRef, owner);
     }
 
-    @Override
-    protected Factory factory()
-    {
-        return FACTORY;
-    }
-
     @Override
     public boolean isClean()
     {
@@ -132,14 +127,14 @@ public class SkipListMemtable extends AbstractAllocatorMemtable
             }
         }
 
-        long[] pair = previous.addAllWithSizeDelta(update, cloner, opGroup, indexer);
+        BTreePartitionUpdater updater = previous.addAll(update, cloner, opGroup, indexer);
         updateMin(minTimestamp, update.stats().minTimestamp);
         updateMin(minLocalDeletionTime, update.stats().minLocalDeletionTime);
-        liveDataSize.addAndGet(initialSize + pair[0]);
+        liveDataSize.addAndGet(initialSize + updater.dataSize);
         columnsCollector.update(update.columns());
         statsCollector.update(update.stats());
         currentOperations.addAndGet(update.operationCount());
-        return pair[1];
+        return updater.colUpdateTimeDelta;
     }
 
     @Override
@@ -235,7 +230,7 @@ public class SkipListMemtable extends AbstractAllocatorMemtable
             rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize));
             rowOverhead -= new LongToken(0).getHeapSize();
             rowOverhead += AtomicBTreePartition.EMPTY_SIZE;
-            rowOverhead += AbstractBTreePartition.HOLDER_UNSHARED_HEAP_SIZE;
+            rowOverhead += BTreePartitionData.UNSHARED_HEAP_SIZE;
             if (!(allocator instanceof NativeAllocator))
                 rowOverhead -= testBufferSize;  // measureDeepOmitShared includes the given number of bytes even for
                                                 // off-heap buffers, but not for direct memory.
diff --git a/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java b/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java
new file mode 100644
index 0000000000..4c59bbba6f
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java
@@ -0,0 +1,722 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.memtable;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterators;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.BufferDecoratedKey;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DataRange;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.DeletionInfo;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.commitlog.CommitLogPosition;
+import org.apache.cassandra.db.filter.ClusteringIndexFilter;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
+import org.apache.cassandra.db.partitions.BTreePartitionData;
+import org.apache.cassandra.db.partitions.BTreePartitionUpdater;
+import org.apache.cassandra.db.partitions.ImmutableBTreePartition;
+import org.apache.cassandra.db.partitions.Partition;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.tries.MemtableTrie;
+import org.apache.cassandra.db.tries.Trie;
+import org.apache.cassandra.dht.AbstractBounds;
+import org.apache.cassandra.dht.Bounds;
+import org.apache.cassandra.dht.IncludingExcludingBounds;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.index.transactions.UpdateTransaction;
+import org.apache.cassandra.io.compress.BufferType;
+import org.apache.cassandra.io.sstable.format.SSTableReadsListener;
+import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.metrics.TrieMemtableMetricsView;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.TableMetadataRef;
+import org.apache.cassandra.utils.Clock;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.memory.EnsureOnHeap;
+import org.apache.cassandra.utils.memory.MemtableAllocator;
+import org.github.jamm.Unmetered;
+
+/**
+ * Trie memtable implementation. Improves memory usage, garbage collection efficiency and lookup performance.
+ * The implementation is described in detail in the paper:
+ *       https://www.vldb.org/pvldb/vol15/p3359-lambov.pdf
+ *
+ * The configuration takes a single parameter:
+ * - shards: the number of shards to split into, defaulting to the number of CPU cores.
+ *
+ * Also see Memtable_API.md.
+ */
+public class TrieMemtable extends AbstractShardedMemtable
+{
+    private static final Logger logger = LoggerFactory.getLogger(TrieMemtable.class);
+
+    /** Buffer type to use for memtable tries (on- vs off-heap) */
+    public static final BufferType BUFFER_TYPE;
+
+    static
+    {
+        switch (DatabaseDescriptor.getMemtableAllocationType())
+        {
+        case unslabbed_heap_buffers:
+        case heap_buffers:
+            BUFFER_TYPE = BufferType.ON_HEAP;
+            break;
+        case offheap_buffers:
+        case offheap_objects:
+            BUFFER_TYPE = BufferType.OFF_HEAP;
+            break;
+        default:
+            throw new AssertionError();
+        }
+    }
+
+    /** If keys is below this length, we will use a recursive procedure for inserting data in the memtable trie. */
+    @VisibleForTesting
+    public static final int MAX_RECURSIVE_KEY_LENGTH = 128;
+
+    /** The byte-ordering conversion version to use for memtables. */
+    public static final ByteComparable.Version BYTE_COMPARABLE_VERSION = ByteComparable.Version.OSS42;
+
+    // Set to true when the memtable requests a switch (e.g. for trie size limit being reached) to ensure only one
+    // thread calls cfs.switchMemtableIfCurrent.
+    private final AtomicBoolean switchRequested = new AtomicBoolean(false);
+
+    /**
+     * Sharded memtable sections. Each is responsible for a contiguous range of the token space (between boundaries[i]
+     * and boundaries[i+1]) and is written to by one thread at a time, while reads are carried out concurrently
+     * (including with any write).
+     */
+    private final MemtableShard[] shards;
+
+    /**
+     * A merged view of the memtable map. Used for partition range queries and flush.
+     * For efficiency we serve single partition requests off the shard which offers more direct MemtableTrie methods.
+     */
+    private final Trie<BTreePartitionData> mergedTrie;
+
+    @Unmetered
+    private final TrieMemtableMetricsView metrics;
+
+    TrieMemtable(AtomicReference<CommitLogPosition> commitLogLowerBound, TableMetadataRef metadataRef, Owner owner, Integer shardCountOption)
+    {
+        super(commitLogLowerBound, metadataRef, owner, shardCountOption);
+        this.metrics = new TrieMemtableMetricsView(metadataRef.keyspace, metadataRef.name);
+        this.shards = generatePartitionShards(boundaries.shardCount(), allocator, metadataRef, metrics);
+        this.mergedTrie = makeMergedTrie(shards);
+    }
+
+    private static MemtableShard[] generatePartitionShards(int splits,
+                                                           MemtableAllocator allocator,
+                                                           TableMetadataRef metadata,
+                                                           TrieMemtableMetricsView metrics)
+    {
+        MemtableShard[] partitionMapContainer = new MemtableShard[splits];
+        for (int i = 0; i < splits; i++)
+            partitionMapContainer[i] = new MemtableShard(metadata, allocator, metrics);
+
+        return partitionMapContainer;
+    }
+
+    private static Trie<BTreePartitionData> makeMergedTrie(MemtableShard[] shards)
+    {
+        List<Trie<BTreePartitionData>> tries = new ArrayList<>(shards.length);
+        for (MemtableShard shard : shards)
+            tries.add(shard.data);
+        return Trie.mergeDistinct(tries);
+    }
+
+    @Override
+    public boolean isClean()
+    {
+        for (MemtableShard shard : shards)
+            if (!shard.isClean())
+                return false;
+        return true;
+    }
+
+    @Override
+    public void discard()
+    {
+        super.discard();
+        // metrics here are not thread safe, but I think we can live with that
+        metrics.lastFlushShardDataSizes.reset();
+        for (MemtableShard shard : shards)
+        {
+            metrics.lastFlushShardDataSizes.update(shard.liveDataSize());
+        }
+        // the buffer release is a longer-running process, do it in a separate loop to not make the metrics update wait
+        for (MemtableShard shard : shards)
+        {
+            shard.data.discardBuffers();
+        }
+    }
+
+    /**
+     * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the appropriate
+     * OpOrdering.
+     *
+     * commitLogSegmentPosition should only be null if this is a secondary index, in which case it is *expected* to be null
+     */
+    @Override
+    public long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup)
+    {
+        try
+        {
+            DecoratedKey key = update.partitionKey();
+            MemtableShard shard = shards[boundaries.getShardForKey(key)];
+            long colUpdateTimeDelta = shard.put(key, update, indexer, opGroup);
+
+            if (shard.data.reachedAllocatedSizeThreshold() && !switchRequested.getAndSet(true))
+            {
+                logger.info("Scheduling flush due to trie size limit reached.");
+                owner.signalFlushRequired(this, ColumnFamilyStore.FlushReason.MEMTABLE_LIMIT);
+            }
+
+            return colUpdateTimeDelta;
+        }
+        catch (MemtableTrie.SpaceExhaustedException e)
+        {
+            // This should never happen as {@link MemtableTrie#reachedAllocatedSizeThreshold} should become
+            // true and trigger a memtable switch long before this limit is reached.
+            throw new IllegalStateException(e);
+        }
+    }
+
+    /**
+     * Technically we should scatter gather on all the core threads because the size in following calls are not
+     * using volatile variables, but for metrics purpose this should be good enough.
+     */
+    @Override
+    public long getLiveDataSize()
+    {
+        long total = 0L;
+        for (MemtableShard shard : shards)
+            total += shard.liveDataSize();
+        return total;
+    }
+
+    @Override
+    public long operationCount()
+    {
+        long total = 0L;
+        for (MemtableShard shard : shards)
+            total += shard.currentOperations();
+        return total;
+    }
+
+    @Override
+    public long partitionCount()
+    {
+        int total = 0;
+        for (MemtableShard shard : shards)
+            total += shard.size();
+        return total;
+    }
+
+    @Override
+    public long getMinTimestamp()
+    {
+        long min = Long.MAX_VALUE;
+        for (MemtableShard shard : shards)
+            min =  Long.min(min, shard.minTimestamp());
+        return min;
+    }
+
+    @Override
+    public int getMinLocalDeletionTime()
+    {
+        int min = Integer.MAX_VALUE;
+        for (MemtableShard shard : shards)
+            min =  Integer.min(min, shard.minLocalDeletionTime());
+        return min;
+    }
+
+    @Override
+    RegularAndStaticColumns columns()
+    {
+        for (MemtableShard shard : shards)
+            columnsCollector.update(shard.columnsCollector);
+        return columnsCollector.get();
+    }
+
+    @Override
+    EncodingStats encodingStats()
+    {
+        for (MemtableShard shard : shards)
+            statsCollector.update(shard.statsCollector.get());
+        return statsCollector.get();
+    }
+
+    @Override
+    public MemtableUnfilteredPartitionIterator partitionIterator(final ColumnFilter columnFilter,
+                                                                 final DataRange dataRange,
+                                                                 SSTableReadsListener readsListener)
+    {
+        AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();
+
+        PartitionPosition left = keyRange.left;
+        PartitionPosition right = keyRange.right;
+        if (left.isMinimum())
+            left = null;
+        if (right.isMinimum())
+            right = null;
+
+        boolean isBound = keyRange instanceof Bounds;
+        boolean includeStart = isBound || keyRange instanceof IncludingExcludingBounds;
+        boolean includeStop = isBound || keyRange instanceof Range;
+
+        Trie<BTreePartitionData> subMap = mergedTrie.subtrie(left, includeStart, right, includeStop);
+
+        return new MemtableUnfilteredPartitionIterator(metadata(),
+                                                       allocator.ensureOnHeap(),
+                                                       subMap,
+                                                       columnFilter,
+                                                       dataRange);
+        // readsListener is ignored as it only accepts sstable signals
+    }
+
+    private Partition getPartition(DecoratedKey key)
+    {
+        int shardIndex = boundaries.getShardForKey(key);
+        BTreePartitionData data = shards[shardIndex].data.get(key);
+        if (data != null)
+            return createPartition(metadata(), allocator.ensureOnHeap(), key, data);
+        else
+            return null;
+    }
+
+    @Override
+    public UnfilteredRowIterator rowIterator(DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, SSTableReadsListener listener)
+    {
+        Partition p = getPartition(key);
+        if (p == null)
+            return null;
+        else
+            return p.unfilteredIterator(selectedColumns, slices, reversed);
+    }
+
+    @Override
+    public UnfilteredRowIterator rowIterator(DecoratedKey key)
+    {
+        Partition p = getPartition(key);
+        return p != null ? p.unfilteredIterator() : null;
+    }
+
+    private static MemtablePartition createPartition(TableMetadata metadata, EnsureOnHeap ensureOnHeap, DecoratedKey key, BTreePartitionData data)
+    {
+        return new MemtablePartition(metadata, ensureOnHeap, key, data);
+    }
+
+    private static MemtablePartition getPartitionFromTrieEntry(TableMetadata metadata, EnsureOnHeap ensureOnHeap, Map.Entry<ByteComparable, BTreePartitionData> en)
+    {
+        DecoratedKey key = BufferDecoratedKey.fromByteComparable(en.getKey(),
+                                                                 BYTE_COMPARABLE_VERSION,
+                                                                 metadata.partitioner);
+        return createPartition(metadata, ensureOnHeap, key, en.getValue());
+    }
+
+
+    @Override
+    public FlushablePartitionSet<MemtablePartition> getFlushSet(PartitionPosition from, PartitionPosition to)
+    {
+        Trie<BTreePartitionData> toFlush = mergedTrie.subtrie(from, true, to, false);
+        long keySize = 0;
+        int keyCount = 0;
+
+        for (Iterator<Map.Entry<ByteComparable, BTreePartitionData>> it = toFlush.entryIterator(); it.hasNext(); )
+        {
+            Map.Entry<ByteComparable, BTreePartitionData> en = it.next();
+            byte[] keyBytes = DecoratedKey.keyFromByteSource(ByteSource.peekable(en.getKey().asComparableBytes(BYTE_COMPARABLE_VERSION)),
+                                                             BYTE_COMPARABLE_VERSION,
+                                                             metadata().partitioner);
+            keySize += keyBytes.length;
+            keyCount++;
+        }
+        long partitionKeySize = keySize;
+        int partitionCount = keyCount;
+
+        return new AbstractFlushablePartitionSet<MemtablePartition>()
+        {
+            public Memtable memtable()
+            {
+                return TrieMemtable.this;
+            }
+
+            public PartitionPosition from()
+            {
+                return from;
+            }
+
+            public PartitionPosition to()
+            {
+                return to;
+            }
+
+            public long partitionCount()
+            {
+                return partitionCount;
+            }
+
+            public Iterator<MemtablePartition> iterator()
+            {
+                return Iterators.transform(toFlush.entryIterator(),
+                                           // During flushing we are certain the memtable will remain at least until
+                                           // the flush completes. No copying to heap is necessary.
+                                           entry -> getPartitionFromTrieEntry(metadata(), EnsureOnHeap.NOOP, entry));
+            }
+
+            public long partitionKeysSize()
+            {
+                return partitionKeySize;
+            }
+        };
+    }
+
+    static class MemtableShard
+    {
+        // The following fields are volatile as we have to make sure that when we
+        // collect results from all sub-ranges, the thread accessing the value
+        // is guaranteed to see the changes to the values.
+
+        // The smallest timestamp for all partitions stored in this shard
+        private volatile long minTimestamp = Long.MAX_VALUE;
+
+        private volatile int minLocalDeletionTime = Integer.MAX_VALUE;
+
+        private volatile long liveDataSize = 0;
+
+        private volatile long currentOperations = 0;
+
+        @Unmetered
+        private final ReentrantLock writeLock = new ReentrantLock();
+
+        // Content map for the given shard. This is implemented as a memtable trie which uses the prefix-free
+        // byte-comparable ByteSource representations of the keys to address the partitions.
+        //
+        // This map is used in a single-producer, multi-consumer fashion: only one thread will insert items but
+        // several threads may read from it and iterate over it. Iterators (especially partition range iterators)
+        // may operate for a long period of time and thus iterators should not throw ConcurrentModificationExceptions
+        // if the underlying map is modified during iteration, they should provide a weakly consistent view of the map
+        // instead.
+        //
+        // Also, this data is backed by memtable memory, when accessing it callers must specify if it can be accessed
+        // unsafely, meaning that the memtable will not be discarded as long as the data is used, or whether the data
+        // should be copied on heap for off-heap allocators.
+        @VisibleForTesting
+        final MemtableTrie<BTreePartitionData> data;
+
+        private final ColumnsCollector columnsCollector;
+
+        private final StatsCollector statsCollector;
+
+        @Unmetered  // total pool size should not be included in memtable's deep size
+        private final MemtableAllocator allocator;
+
+        @Unmetered
+        private final TrieMemtableMetricsView metrics;
+
+        @VisibleForTesting
+        MemtableShard(TableMetadataRef metadata, MemtableAllocator allocator, TrieMemtableMetricsView metrics)
+        {
+            this.data = new MemtableTrie<>(BUFFER_TYPE);
+            this.columnsCollector = new AbstractMemtable.ColumnsCollector(metadata.get().regularAndStaticColumns());
+            this.statsCollector = new AbstractMemtable.StatsCollector();
+            this.allocator = allocator;
+            this.metrics = metrics;
+        }
+
+        public long put(DecoratedKey key, PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup) throws MemtableTrie.SpaceExhaustedException
+        {
+            BTreePartitionUpdater updater = new BTreePartitionUpdater(allocator, allocator.cloner(opGroup), opGroup, indexer);
+            boolean locked = writeLock.tryLock();
+            if (locked)
+            {
+                metrics.uncontendedPuts.inc();
+            }
+            else
+            {
+                metrics.contendedPuts.inc();
+                long lockStartTime = Clock.Global.nanoTime();
+                writeLock.lock();
+                metrics.contentionTime.addNano(Clock.Global.nanoTime() - lockStartTime);
+            }
+            try
+            {
+                try
+                {
+                    long onHeap = data.sizeOnHeap();
+                    long offHeap = data.sizeOffHeap();
+                    // Use the fast recursive put if we know the key is small enough to not cause a stack overflow.
+                    data.putSingleton(key,
+                                      update,
+                                      updater::mergePartitions,
+                                      key.getKeyLength() < MAX_RECURSIVE_KEY_LENGTH);
+                    allocator.offHeap().adjust(data.sizeOffHeap() - offHeap, opGroup);
+                    allocator.onHeap().adjust(data.sizeOnHeap() - onHeap, opGroup);
+                }
+                finally
+                {
+                    minTimestamp = Math.min(minTimestamp, update.stats().minTimestamp);
+                    minLocalDeletionTime = Math.min(minLocalDeletionTime, update.stats().minLocalDeletionTime);
+                    liveDataSize += updater.dataSize;
+                    currentOperations += update.operationCount();
+
+                    columnsCollector.update(update.columns());
+                    statsCollector.update(update.stats());
+                }
+            }
+            finally
+            {
+                writeLock.unlock();
+            }
+            return updater.colUpdateTimeDelta;
+        }
+
+        public boolean isClean()
+        {
+            return data.isEmpty();
+        }
+
+        public int size()
+        {
+            return data.valuesCount();
+        }
+
+        long minTimestamp()
+        {
+            return minTimestamp;
+        }
+
+        long liveDataSize()
+        {
+            return liveDataSize;
+        }
+
+        long currentOperations()
+        {
+            return currentOperations;
+        }
+
+        int minLocalDeletionTime()
+        {
+            return minLocalDeletionTime;
+        }
+    }
+
+    static class MemtableUnfilteredPartitionIterator extends AbstractUnfilteredPartitionIterator implements UnfilteredPartitionIterator
+    {
+        private final TableMetadata metadata;
+        private final EnsureOnHeap ensureOnHeap;
+        private final Iterator<Map.Entry<ByteComparable, BTreePartitionData>> iter;
+        private final ColumnFilter columnFilter;
+        private final DataRange dataRange;
+
+        public MemtableUnfilteredPartitionIterator(TableMetadata metadata,
+                                                   EnsureOnHeap ensureOnHeap,
+                                                   Trie<BTreePartitionData> source,
+                                                   ColumnFilter columnFilter,
+                                                   DataRange dataRange)
+        {
+            this.metadata = metadata;
+            this.ensureOnHeap = ensureOnHeap;
+            this.iter = source.entryIterator();
+            this.columnFilter = columnFilter;
+            this.dataRange = dataRange;
+        }
+
+        public TableMetadata metadata()
+        {
+            return metadata;
+        }
+
+        public boolean hasNext()
+        {
+            return iter.hasNext();
+        }
+
+        public UnfilteredRowIterator next()
+        {
+            Partition partition = getPartitionFromTrieEntry(metadata(), ensureOnHeap, iter.next());
+            DecoratedKey key = partition.partitionKey();
+            ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key);
+
+            return filter.getUnfilteredRowIterator(columnFilter, partition);
+        }
+    }
+
+    static class MemtablePartition extends ImmutableBTreePartition
+    {
+
+        private final EnsureOnHeap ensureOnHeap;
+
+        private MemtablePartition(TableMetadata table, EnsureOnHeap ensureOnHeap, DecoratedKey key, BTreePartitionData data)
+        {
+            super(table, key, data);
+            this.ensureOnHeap = ensureOnHeap;
+        }
+
+        @Override
+        protected boolean canHaveShadowedData()
+        {
+            // The BtreePartitionData we store in the memtable are build iteratively by BTreePartitionData.add(), which
+            // doesn't make sure there isn't shadowed data, so we'll need to eliminate any.
+            return true;
+        }
+
+
+        @Override
+        public DeletionInfo deletionInfo()
+        {
+            return ensureOnHeap.applyToDeletionInfo(super.deletionInfo());
+        }
+
+        @Override
+        public Row staticRow()
+        {
+            return ensureOnHeap.applyToStatic(super.staticRow());
+        }
+
+        @Override
+        public DecoratedKey partitionKey()
+        {
+            return ensureOnHeap.applyToPartitionKey(super.partitionKey());
+        }
+
+        @Override
+        public Row getRow(Clustering<?> clustering)
+        {
+            return ensureOnHeap.applyToRow(super.getRow(clustering));
+        }
+
+        @Override
+        public Row lastRow()
+        {
+            return ensureOnHeap.applyToRow(super.lastRow());
+        }
+
+        @Override
+        public UnfilteredRowIterator unfilteredIterator(ColumnFilter selection, Slices slices, boolean reversed)
+        {
+            return unfilteredIterator(holder(), selection, slices, reversed);
+        }
+
+        @Override
+        public UnfilteredRowIterator unfilteredIterator(ColumnFilter selection, NavigableSet<Clustering<?>> clusteringsInQueryOrder, boolean reversed)
+        {
+            return ensureOnHeap.applyToPartition(super.unfilteredIterator(selection, clusteringsInQueryOrder, reversed));
+        }
+
+        @Override
+        public UnfilteredRowIterator unfilteredIterator()
+        {
+            return unfilteredIterator(ColumnFilter.selection(super.columns()), Slices.ALL, false);
+        }
+
+        @Override
+        public UnfilteredRowIterator unfilteredIterator(BTreePartitionData current, ColumnFilter selection, Slices slices, boolean reversed)
+        {
+            return ensureOnHeap.applyToPartition(super.unfilteredIterator(current, selection, slices, reversed));
+        }
+
+        @Override
+        public Iterator<Row> iterator()
+        {
+            return ensureOnHeap.applyToPartition(super.iterator());
+        }
+    }
+
+    public static Factory factory(Map<String, String> optionsCopy)
+    {
+        String shardsString = optionsCopy.remove(SHARDS_OPTION);
+        Integer shardCount = shardsString != null ? Integer.parseInt(shardsString) : null;
+        return new Factory(shardCount);
+    }
+
+    static class Factory implements Memtable.Factory
+    {
+        final Integer shardCount;
+
+        Factory(Integer shardCount)
+        {
+            this.shardCount = shardCount;
+        }
+
+        public Memtable create(AtomicReference<CommitLogPosition> commitLogLowerBound,
+                               TableMetadataRef metadaRef,
+                               Owner owner)
+        {
+            return new TrieMemtable(commitLogLowerBound, metadaRef, owner, shardCount);
+        }
+
+        @Override
+        public TableMetrics.ReleasableMetric createMemtableMetrics(TableMetadataRef metadataRef)
+        {
+            TrieMemtableMetricsView metrics = new TrieMemtableMetricsView(metadataRef.keyspace, metadataRef.name);
+            return metrics::release;
+        }
+
+        public boolean equals(Object o)
+        {
+            if (this == o)
+                return true;
+            if (o == null || getClass() != o.getClass())
+                return false;
+            Factory factory = (Factory) o;
+            return Objects.equals(shardCount, factory.shardCount);
+        }
+
+        public int hashCode()
+        {
+            return Objects.hash(shardCount);
+        }
+    }
+
+    @VisibleForTesting
+    public long unusedReservedMemory()
+    {
+        long size = 0;
+        for (MemtableShard shard : shards)
+            size += shard.data.unusedReservedMemory();
+        return size;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
index 5926ced8b0..3327237573 100644
--- a/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
@@ -22,14 +22,12 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.NavigableSet;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Iterators;
 
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.SearchIterator;
 import org.apache.cassandra.utils.btree.BTree;
 
@@ -37,12 +35,9 @@ import static org.apache.cassandra.utils.btree.BTree.Dir.desc;
 
 public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
 {
-    protected static final Holder EMPTY = new Holder(RegularAndStaticColumns.NONE, BTree.empty(), DeletionInfo.LIVE, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS);
-    public static final long HOLDER_UNSHARED_HEAP_SIZE = ObjectSizes.measure(EMPTY);
-
     protected final DecoratedKey partitionKey;
 
-    protected abstract Holder holder();
+    protected abstract BTreePartitionData holder();
     protected abstract boolean canHaveShadowedData();
 
     protected AbstractBTreePartition(DecoratedKey partitionKey)
@@ -50,31 +45,6 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         this.partitionKey = partitionKey;
     }
 
-    @VisibleForTesting
-    public static final class Holder
-    {
-        public final RegularAndStaticColumns columns;
-        public final DeletionInfo deletionInfo;
-        // the btree of rows
-        public final Object[] tree;
-        public final Row staticRow;
-        public final EncodingStats stats;
-
-        Holder(RegularAndStaticColumns columns, Object[] tree, DeletionInfo deletionInfo, Row staticRow, EncodingStats stats)
-        {
-            this.columns = columns;
-            this.tree = tree;
-            this.deletionInfo = deletionInfo;
-            this.staticRow = staticRow == null ? Rows.EMPTY_STATIC_ROW : staticRow;
-            this.stats = stats;
-        }
-
-        protected Holder withColumns(RegularAndStaticColumns columns)
-        {
-            return new Holder(columns, this.tree, this.deletionInfo, this.staticRow, this.stats);
-        }
-    }
-
     public DeletionInfo deletionInfo()
     {
         return holder().deletionInfo;
@@ -87,13 +57,13 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
 
     public boolean isEmpty()
     {
-        Holder holder = holder();
+        BTreePartitionData holder = holder();
         return holder.deletionInfo.isLive() && BTree.isEmpty(holder.tree) && holder.staticRow.isEmpty();
     }
 
     public boolean hasRows()
     {
-        Holder holder = holder();
+        BTreePartitionData holder = holder();
         return !BTree.isEmpty(holder.tree);
     }
 
@@ -122,7 +92,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
     public Row getRow(Clustering<?> clustering)
     {
         ColumnFilter columns = ColumnFilter.selection(columns());
-        Holder holder = holder();
+        BTreePartitionData holder = holder();
 
         if (clustering == Clustering.STATIC_CLUSTERING)
         {
@@ -152,7 +122,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         return row.filter(columns, activeDeletion, true, metadata());
     }
 
-    private Row staticRow(Holder current, ColumnFilter columns, boolean setActiveDeletionToRow)
+    private Row staticRow(BTreePartitionData current, ColumnFilter columns, boolean setActiveDeletionToRow)
     {
         DeletionTime partitionDeletion = current.deletionInfo.getPartitionDeletion();
         if (columns.fetchedColumns().statics.isEmpty() || (current.staticRow.isEmpty() && partitionDeletion.isLive()))
@@ -185,7 +155,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         return unfilteredIterator(holder(), selection, slices, reversed);
     }
 
-    public UnfilteredRowIterator unfilteredIterator(Holder current, ColumnFilter selection, Slices slices, boolean reversed)
+    public UnfilteredRowIterator unfilteredIterator(BTreePartitionData current, ColumnFilter selection, Slices slices, boolean reversed)
     {
         Row staticRow = staticRow(current, selection, false);
         if (slices.size() == 0)
@@ -199,7 +169,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
                : new SlicesIterator(selection, slices, reversed, current, staticRow);
     }
 
-    private UnfilteredRowIterator sliceIterator(ColumnFilter selection, Slice slice, boolean reversed, Holder current, Row staticRow)
+    private UnfilteredRowIterator sliceIterator(ColumnFilter selection, Slice slice, boolean reversed, BTreePartitionData current, Row staticRow)
     {
         ClusteringBound<?> start = slice.start().isBottom() ? null : slice.start();
         ClusteringBound<?> end = slice.end().isTop() ? null : slice.end();
@@ -209,7 +179,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
     }
 
     private RowAndDeletionMergeIterator merge(Iterator<Row> rowIter, Iterator<RangeTombstone> deleteIter,
-                                              ColumnFilter selection, boolean reversed, Holder current, Row staticRow)
+                                              ColumnFilter selection, boolean reversed, BTreePartitionData current, Row staticRow)
     {
         return new RowAndDeletionMergeIterator(metadata(), partitionKey(), current.deletionInfo.getPartitionDeletion(),
                                                selection, staticRow, reversed, current.stats,
@@ -219,10 +189,10 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
 
     private abstract class AbstractIterator extends AbstractUnfilteredRowIterator
     {
-        final Holder current;
+        final BTreePartitionData current;
         final ColumnFilter selection;
 
-        private AbstractIterator(Holder current, Row staticRow, ColumnFilter selection, boolean isReversed)
+        private AbstractIterator(BTreePartitionData current, Row staticRow, ColumnFilter selection, boolean isReversed)
         {
             super(AbstractBTreePartition.this.metadata(),
                   AbstractBTreePartition.this.partitionKey(),
@@ -245,7 +215,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         private int idx;
         private Iterator<Unfiltered> currentSlice;
 
-        private SlicesIterator(ColumnFilter selection, Slices slices, boolean isReversed, Holder current, Row staticRow)
+        private SlicesIterator(ColumnFilter selection, Slices slices, boolean isReversed, BTreePartitionData current, Row staticRow)
         {
             super(current, staticRow, selection, isReversed);
             this.slices = slices;
@@ -283,7 +253,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         private ClusteringsIterator(ColumnFilter selection,
                                     NavigableSet<Clustering<?>> clusteringsInQueryOrder,
                                     boolean isReversed,
-                                    Holder current,
+                                    BTreePartitionData current,
                                     Row staticRow)
         {
             super(current, staticRow, selection, isReversed);
@@ -326,12 +296,12 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         }
     }
 
-    protected static Holder build(UnfilteredRowIterator iterator, int initialRowCapacity)
+    protected static BTreePartitionData build(UnfilteredRowIterator iterator, int initialRowCapacity)
     {
         return build(iterator, initialRowCapacity, true);
     }
 
-    protected static Holder build(UnfilteredRowIterator iterator, int initialRowCapacity, boolean ordered)
+    protected static BTreePartitionData build(UnfilteredRowIterator iterator, int initialRowCapacity, boolean ordered)
     {
         TableMetadata metadata = iterator.metadata();
         RegularAndStaticColumns columns = iterator.columns();
@@ -353,12 +323,12 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
         if (reversed)
             builder.reverse();
 
-        return new Holder(columns, builder.build(), deletionBuilder.build(), iterator.staticRow(), iterator.stats());
+        return new BTreePartitionData(columns, builder.build(), deletionBuilder.build(), iterator.staticRow(), iterator.stats());
     }
 
     // Note that when building with a RowIterator, deletion will generally be LIVE, but we allow to pass it nonetheless because PartitionUpdate
     // passes a MutableDeletionInfo that it mutates later.
-    protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats)
+    protected static BTreePartitionData build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats)
     {
         RegularAndStaticColumns columns = rows.columns();
         boolean reversed = rows.isReverseOrder();
@@ -375,7 +345,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
             Row staticRow = rows.staticRow();
             EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion)
                                                      : EncodingStats.NO_STATS;
-            return new Holder(columns, tree, deletion, staticRow, stats);
+            return new BTreePartitionData(columns, tree, deletion, staticRow, stats);
         }
     }
 
@@ -420,7 +390,7 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
             return false;
 
         PartitionUpdate that = (PartitionUpdate) obj;
-        Holder a = this.holder(), b = that.holder();
+        BTreePartitionData a = this.holder(), b = that.holder();
         return partitionKey.equals(that.partitionKey)
                && metadata().id.equals(that.metadata().id)
                && a.deletionInfo.equals(b.deletionInfo)
@@ -446,16 +416,4 @@ public abstract class AbstractBTreePartition implements Partition, Iterable<Row>
 
         return BTree.findByIndex(tree, BTree.size(tree) - 1);
     }
-
-    @VisibleForTesting
-    public static Holder unsafeGetEmptyHolder()
-    {
-        return EMPTY;
-    }
-
-    @VisibleForTesting
-    public static Holder unsafeConstructHolder(RegularAndStaticColumns columns, Object[] tree, DeletionInfo deletionInfo, Row staticRow, EncodingStats stats)
-    {
-        return new Holder(columns, tree, deletionInfo, staticRow, stats);
-    }
 }
diff --git a/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
index f3dbfc7daf..6cbb985f51 100644
--- a/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
@@ -18,29 +18,25 @@
 package org.apache.cassandra.db.partitions;
 
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.List;
 import java.util.NavigableSet;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.index.transactions.UpdateTransaction;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.index.transactions.UpdateTransaction;
 import org.apache.cassandra.utils.ObjectSizes;
-import org.apache.cassandra.utils.btree.BTree;
-import org.apache.cassandra.utils.btree.UpdateFunction;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.Cloner;
-import org.apache.cassandra.utils.memory.HeapCloner;
 import org.apache.cassandra.utils.memory.MemtableAllocator;
 import org.github.jamm.Unmetered;
-import com.google.common.annotations.VisibleForTesting;
 
 import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
@@ -72,7 +68,7 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
     // CLOCK_GRANULARITY = 1^9ns >> CLOCK_SHIFT == 132us == (1/7.63)ms
 
     private static final AtomicIntegerFieldUpdater<AtomicBTreePartition> wasteTrackerUpdater = AtomicIntegerFieldUpdater.newUpdater(AtomicBTreePartition.class, "wasteTracker");
-    private static final AtomicReferenceFieldUpdater<AtomicBTreePartition, Holder> refUpdater = AtomicReferenceFieldUpdater.newUpdater(AtomicBTreePartition.class, Holder.class, "ref");
+    private static final AtomicReferenceFieldUpdater<AtomicBTreePartition, BTreePartitionData> refUpdater = AtomicReferenceFieldUpdater.newUpdater(AtomicBTreePartition.class, BTreePartitionData.class, "ref");
 
     /**
      * (clock + allocation) granularity are combined to give us an acceptable (waste) allocation rate that is defined by
@@ -87,7 +83,7 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
     @Unmetered
     private final MemtableAllocator allocator;
 
-    private volatile Holder ref;
+    private volatile BTreePartitionData ref;
 
     @Unmetered
     private final TableMetadataRef metadata;
@@ -98,10 +94,10 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
         super(partitionKey);
         this.metadata = metadata;
         this.allocator = allocator;
-        this.ref = EMPTY;
+        this.ref = BTreePartitionData.EMPTY;
     }
 
-    protected Holder holder()
+    protected BTreePartitionData holder()
     {
         return ref;
     }
@@ -116,92 +112,78 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
         return true;
     }
 
-    private long[] addAllWithSizeDeltaInternal(RowUpdater updater, PartitionUpdate update, UpdateTransaction indexer)
+    /**
+     * Adds a given update to this in-memtable partition.
+     *
+     * @return an array containing first the difference in size seen after merging the updates, and second the minimum
+     * time delta between updates.
+     */
+    public BTreePartitionUpdater addAll(final PartitionUpdate update, Cloner cloner, OpOrder.Group writeOp, UpdateTransaction indexer)
     {
-        Holder current = ref;
-        updater.reset();
+        return new Updater(allocator, cloner, writeOp, indexer).addAll(update);
+    }
 
-        if (!update.deletionInfo().getPartitionDeletion().isLive())
-            indexer.onPartitionDeletion(update.deletionInfo().getPartitionDeletion());
+    @VisibleForTesting
+    public void unsafeSetHolder(BTreePartitionData holder)
+    {
+        ref = holder;
+    }
 
-        if (update.deletionInfo().hasRanges())
-            update.deletionInfo().rangeIterator(false).forEachRemaining(indexer::onRangeTombstone);
+    @VisibleForTesting
+    public BTreePartitionData unsafeGetHolder()
+    {
+        return ref;
+    }
 
-        DeletionInfo deletionInfo;
-        if (update.deletionInfo().mayModify(current.deletionInfo))
-        {
-            if (updater.inputDeletionInfoCopy == null)
-                updater.inputDeletionInfoCopy = update.deletionInfo().clone(HeapCloner.instance);
+    class Updater extends BTreePartitionUpdater
+    {
+        BTreePartitionData current;
 
-            deletionInfo = current.deletionInfo.mutableCopy().add(updater.inputDeletionInfoCopy);
-            updater.onAllocatedOnHeap(deletionInfo.unsharedHeapSize() - current.deletionInfo.unsharedHeapSize());
-        }
-        else
+        public Updater(MemtableAllocator allocator, Cloner cloner, OpOrder.Group writeOp, UpdateTransaction indexer)
         {
-            deletionInfo = current.deletionInfo;
+            super(allocator, cloner, writeOp, indexer);
         }
 
-        RegularAndStaticColumns columns = update.columns().mergeTo(current.columns);
-        updater.onAllocatedOnHeap(columns.unsharedHeapSize() - current.columns.unsharedHeapSize());
-        Row newStatic = update.staticRow();
-        Row staticRow = newStatic.isEmpty()
-                        ? current.staticRow
-                        : (current.staticRow.isEmpty() ? updater.insert(newStatic) : updater.merge(current.staticRow, newStatic));
-        Object[] tree = BTree.update(current.tree, update.holder().tree, update.metadata().comparator, updater);
-        EncodingStats newStats = current.stats.mergeWith(update.stats());
-        updater.onAllocatedOnHeap(newStats.unsharedHeapSize() - current.stats.unsharedHeapSize());
-
-        if (tree != null && refUpdater.compareAndSet(this, current, new Holder(columns, tree, deletionInfo, staticRow, newStats)))
-        {
-            updater.finish();
-            return new long[]{ updater.dataSize, updater.colUpdateTimeDelta };
-        }
-        else
-        {
-            return null;
-        }
-    }
-    /**
-     * Adds a given update to this in-memtable partition.
-     *
-     * @return an array containing first the difference in size seen after merging the updates, and second the minimum
-     * time detla between updates.
-     */
-    public long[] addAllWithSizeDelta(final PartitionUpdate update,
-                                      Cloner cloner,
-                                      OpOrder.Group writeOp,
-                                      UpdateTransaction indexer)
-    {
-        RowUpdater updater = new RowUpdater(allocator, cloner, writeOp, indexer);
-        try
+        Updater addAll(final PartitionUpdate update)
         {
-            boolean shouldLock = shouldLock(writeOp);
-            indexer.start();
-
-            while (true)
+            try
             {
-                if (shouldLock)
+                boolean shouldLock = shouldLock(writeOp);
+                indexer.start();
+
+                while (true)
                 {
-                    synchronized (this)
+                    if (shouldLock)
                     {
-                        long[] result = addAllWithSizeDeltaInternal(updater, update, indexer);
-                        if (result != null)
-                            return result;
+                        synchronized (this)
+                        {
+                            if (tryUpdateData(update))
+                                return this;
+                        }
                     }
-                }
-                else
-                {
-                    long[] result = addAllWithSizeDeltaInternal(updater, update, indexer);
-                    if (result != null)
-                        return result;
+                    else
+                    {
+                        if (tryUpdateData(update))
+                            return this;
 
-                    shouldLock = shouldLock(updater.heapSize, writeOp);
+                        shouldLock = shouldLock(heapSize, writeOp);
+                    }
                 }
             }
+            finally
+            {
+                indexer.commit();
+                reportAllocatedMemory();
+            }
         }
-        finally
+
+        private boolean tryUpdateData(PartitionUpdate update)
         {
-            indexer.commit();
+            current = ref;
+            this.dataSize = 0;
+            this.heapSize = 0;
+            BTreePartitionData result = makeMergedPartition(current, update);
+            return refUpdater.compareAndSet(AtomicBTreePartition.this, current, result);
         }
     }
 
@@ -254,7 +236,7 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
     }
 
     @Override
-    public UnfilteredRowIterator unfilteredIterator(Holder current, ColumnFilter selection, Slices slices, boolean reversed)
+    public UnfilteredRowIterator unfilteredIterator(BTreePartitionData current, ColumnFilter selection, Slices slices, boolean reversed)
     {
         return allocator.ensureOnHeap().applyToPartition(super.unfilteredIterator(current, selection, slices, reversed));
     }
@@ -338,119 +320,4 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
             return wasteTracker + 1;
         return wasteTracker;
     }
-
-    @VisibleForTesting
-    public void unsafeSetHolder(Holder holder)
-    {
-        ref = holder;
-    }
-
-    @VisibleForTesting
-    public Holder unsafeGetHolder()
-    {
-        return ref;
-    }
-
-    // the function we provide to the btree utilities to perform any column replacements
-    private static final class RowUpdater implements UpdateFunction<Row, Row>, ColumnData.PostReconciliationFunction
-    {
-        final MemtableAllocator allocator;
-        final OpOrder.Group writeOp;
-        final UpdateTransaction indexer;
-        final Cloner cloner;
-        long dataSize;
-        long heapSize;
-        long colUpdateTimeDelta = Long.MAX_VALUE;
-        List<Row> inserted; // TODO: replace with walk of aborted BTree
-
-        DeletionInfo inputDeletionInfoCopy = null;
-
-        private RowUpdater(MemtableAllocator allocator, Cloner cloner, OpOrder.Group writeOp, UpdateTransaction indexer)
-        {
-            this.allocator = allocator;
-            this.writeOp = writeOp;
-            this.indexer = indexer;
-            this.cloner = cloner;
-        }
-
-        @Override
-        public Row insert(Row insert)
-        {
-            Row data = insert.clone(cloner);
-            indexer.onInserted(insert);
-
-            this.dataSize += data.dataSize();
-            onAllocatedOnHeap(data.unsharedHeapSizeExcludingData());
-            if (inserted == null)
-                inserted = new ArrayList<>();
-            inserted.add(data);
-            return data;
-        }
-
-        public Row merge(Row existing, Row update)
-        {
-            Row reconciled = Rows.merge(existing, update, this);
-            indexer.onUpdated(existing, reconciled);
-
-            if (inserted == null)
-                inserted = new ArrayList<>();
-            inserted.add(reconciled);
-
-            return reconciled;
-        }
-
-        public Row retain(Row existing)
-        {
-            return existing;
-        }
-
-        protected void reset()
-        {
-            this.dataSize = 0;
-            this.heapSize = 0;
-            if (inserted != null)
-                inserted.clear();
-        }
-
-        public Cell<?> merge(Cell<?> previous, Cell<?> insert)
-        {
-            if (insert != previous)
-            {
-                long timeDelta = Math.abs(insert.timestamp() - previous.timestamp());
-                if (timeDelta < colUpdateTimeDelta)
-                    colUpdateTimeDelta = timeDelta;
-            }
-            if (cloner != null)
-                insert = cloner.clone(insert);
-            dataSize += insert.dataSize() - previous.dataSize();
-            heapSize += insert.unsharedHeapSizeExcludingData() - previous.unsharedHeapSizeExcludingData();
-            return insert;
-        }
-
-        public ColumnData insert(ColumnData insert)
-        {
-            if (cloner != null)
-                insert = insert.clone(cloner);
-            dataSize += insert.dataSize();
-            heapSize += insert.unsharedHeapSizeExcludingData();
-            return insert;
-        }
-
-        @Override
-        public void delete(ColumnData existing)
-        {
-            dataSize -= existing.dataSize();
-            heapSize -= existing.unsharedHeapSizeExcludingData();
-        }
-
-        public void onAllocatedOnHeap(long heapSize)
-        {
-            this.heapSize += heapSize;
-        }
-
-        protected void finish()
-        {
-            allocator.onHeap().adjust(heapSize, writeOp);
-        }
-    }
 }
diff --git a/src/java/org/apache/cassandra/db/partitions/BTreePartitionData.java b/src/java/org/apache/cassandra/db/partitions/BTreePartitionData.java
new file mode 100644
index 0000000000..aefcd1d5a9
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/partitions/BTreePartitionData.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.partitions;
+
+import java.util.Arrays;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.cassandra.db.DeletionInfo;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.Rows;
+import org.apache.cassandra.utils.ObjectSizes;
+import org.apache.cassandra.utils.btree.BTree;
+
+/**
+ * Holder of the content of a partition, see {@link AbstractBTreePartition}.
+ * When updating a partition one holder is swapped for another atomically.
+ */
+public final class BTreePartitionData
+{
+    public static final BTreePartitionData EMPTY = new BTreePartitionData(RegularAndStaticColumns.NONE,
+                                                                          BTree.empty(),
+                                                                          DeletionInfo.LIVE,
+                                                                          Rows.EMPTY_STATIC_ROW,
+                                                                          EncodingStats.NO_STATS);
+    public static final long UNSHARED_HEAP_SIZE = ObjectSizes.measure(EMPTY);
+
+
+    final RegularAndStaticColumns columns;
+    final DeletionInfo deletionInfo;
+    // the btree of rows
+    final Object[] tree;
+    final Row staticRow;
+    public final EncodingStats stats;
+
+    BTreePartitionData(RegularAndStaticColumns columns,
+                       Object[] tree,
+                       DeletionInfo deletionInfo,
+                       Row staticRow,
+                       EncodingStats stats)
+    {
+        this.columns = columns;
+        this.tree = tree;
+        this.deletionInfo = deletionInfo;
+        this.staticRow = staticRow == null ? Rows.EMPTY_STATIC_ROW : staticRow;
+        this.stats = stats;
+    }
+
+    BTreePartitionData withColumns(RegularAndStaticColumns columns)
+    {
+        return new BTreePartitionData(columns, this.tree, this.deletionInfo, this.staticRow, this.stats);
+    }
+
+    @VisibleForTesting
+    public static BTreePartitionData unsafeGetEmpty()
+    {
+        return EMPTY;
+    }
+
+    @VisibleForTesting
+    public static BTreePartitionData unsafeConstruct(RegularAndStaticColumns columns,
+                                                     Object[] tree,
+                                                     DeletionInfo deletionInfo,
+                                                     Row staticRow,
+                                                     EncodingStats stats)
+    {
+        return new BTreePartitionData(columns, tree, deletionInfo, staticRow, stats);
+    }
+
+    @VisibleForTesting
+    public static void unsafeInvalidate(AtomicBTreePartition partition)
+    {
+        BTreePartitionData holder = partition.unsafeGetHolder();
+        if (!BTree.isEmpty(holder.tree))
+        {
+            partition.unsafeSetHolder(unsafeConstruct(holder.columns,
+                                                      Arrays.copyOf(holder.tree, holder.tree.length),
+                                                      holder.deletionInfo,
+                                                      holder.staticRow,
+                                                      holder.stats));
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/partitions/BTreePartitionUpdater.java b/src/java/org/apache/cassandra/db/partitions/BTreePartitionUpdater.java
new file mode 100644
index 0000000000..7c09ae81f7
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/partitions/BTreePartitionUpdater.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.partitions;
+
+import org.apache.cassandra.db.DeletionInfo;
+import org.apache.cassandra.db.RegularAndStaticColumns;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.ColumnData;
+import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.Rows;
+import org.apache.cassandra.index.transactions.UpdateTransaction;
+import org.apache.cassandra.utils.btree.BTree;
+import org.apache.cassandra.utils.btree.UpdateFunction;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+import org.apache.cassandra.utils.memory.Cloner;
+import org.apache.cassandra.utils.memory.HeapCloner;
+import org.apache.cassandra.utils.memory.MemtableAllocator;
+
+/**
+ *  the function we provide to the trie and btree utilities to perform any row and column replacements
+ */
+public class BTreePartitionUpdater implements UpdateFunction<Row, Row>, ColumnData.PostReconciliationFunction
+{
+    final MemtableAllocator allocator;
+    final OpOrder.Group writeOp;
+    final Cloner cloner;
+    final UpdateTransaction indexer;
+    public long dataSize;
+    long heapSize;
+    public long colUpdateTimeDelta = Long.MAX_VALUE;
+
+    public BTreePartitionUpdater(MemtableAllocator allocator, Cloner cloner, OpOrder.Group writeOp, UpdateTransaction indexer)
+    {
+        this.allocator = allocator;
+        this.cloner = cloner;
+        this.writeOp = writeOp;
+        this.indexer = indexer;
+        this.heapSize = 0;
+        this.dataSize = 0;
+    }
+
+    public BTreePartitionData mergePartitions(BTreePartitionData current, final PartitionUpdate update)
+    {
+        if (current == null)
+        {
+            current = BTreePartitionData.EMPTY;
+            onAllocatedOnHeap(BTreePartitionData.UNSHARED_HEAP_SIZE);
+        }
+
+        try
+        {
+            indexer.start();
+
+            return makeMergedPartition(current, update);
+        }
+        finally
+        {
+            indexer.commit();
+            reportAllocatedMemory();
+        }
+    }
+
+    protected BTreePartitionData makeMergedPartition(BTreePartitionData current, PartitionUpdate update)
+    {
+        DeletionInfo newDeletionInfo = merge(current.deletionInfo, update.deletionInfo());
+
+        RegularAndStaticColumns columns = current.columns;
+        RegularAndStaticColumns newColumns = update.columns().mergeTo(columns);
+        onAllocatedOnHeap(newColumns.unsharedHeapSize() - columns.unsharedHeapSize());
+        Row newStatic = mergeStatic(current.staticRow, update.staticRow());
+
+        Object[] tree = BTree.update(current.tree, update.holder().tree, update.metadata().comparator, this);
+        EncodingStats newStats = current.stats.mergeWith(update.stats());
+        onAllocatedOnHeap(newStats.unsharedHeapSize() - current.stats.unsharedHeapSize());
+
+        return new BTreePartitionData(newColumns, tree, newDeletionInfo, newStatic, newStats);
+    }
+
+    private Row mergeStatic(Row current, Row update)
+    {
+        if (update.isEmpty())
+            return current;
+        if (current.isEmpty())
+            return insert(update);
+
+        return merge(current, update);
+    }
+
+    private DeletionInfo merge(DeletionInfo existing, DeletionInfo update)
+    {
+        if (update.isLive() || !update.mayModify(existing))
+            return existing;
+
+        if (!update.getPartitionDeletion().isLive())
+            indexer.onPartitionDeletion(update.getPartitionDeletion());
+
+        if (update.hasRanges())
+            update.rangeIterator(false).forEachRemaining(indexer::onRangeTombstone);
+
+        // Like for rows, we have to clone the update in case internal buffers (when it has range tombstones) reference
+        // memory we shouldn't hold into. But we don't ever store this off-heap currently so we just default to the
+        // HeapAllocator (rather than using 'allocator').
+        DeletionInfo newInfo = existing.mutableCopy().add(update.clone(HeapCloner.instance));
+        onAllocatedOnHeap(newInfo.unsharedHeapSize() - existing.unsharedHeapSize());
+        return newInfo;
+    }
+
+    @Override
+    public Row insert(Row insert)
+    {
+        Row data = insert.clone(cloner);
+        indexer.onInserted(insert);
+
+        dataSize += data.dataSize();
+        onAllocatedOnHeap(data.unsharedHeapSizeExcludingData());
+        return data;
+    }
+
+    public Row merge(Row existing, Row update)
+    {
+        Row reconciled = Rows.merge(existing, update, this);
+        indexer.onUpdated(existing, reconciled);
+
+        return reconciled;
+    }
+
+    public Cell<?> merge(Cell<?> previous, Cell<?> insert)
+    {
+        if (insert != previous)
+        {
+            long timeDelta = Math.abs(insert.timestamp() - previous.timestamp());
+            if (timeDelta < colUpdateTimeDelta)
+                colUpdateTimeDelta = timeDelta;
+        }
+        if (cloner != null)
+            insert = cloner.clone(insert);
+        dataSize += insert.dataSize() - previous.dataSize();
+        heapSize += insert.unsharedHeapSizeExcludingData() - previous.unsharedHeapSizeExcludingData();
+        return insert;
+    }
+
+    public ColumnData insert(ColumnData insert)
+    {
+        if (cloner != null)
+            insert = insert.clone(cloner);
+        dataSize += insert.dataSize();
+        heapSize += insert.unsharedHeapSizeExcludingData();
+        return insert;
+    }
+
+    @Override
+    public void delete(ColumnData existing)
+    {
+        dataSize -= existing.dataSize();
+        heapSize -= existing.unsharedHeapSizeExcludingData();
+    }
+
+    public void onAllocatedOnHeap(long heapSize)
+    {
+        this.heapSize += heapSize;
+    }
+
+    public void reportAllocatedMemory()
+    {
+        allocator.onHeap().adjust(heapSize, writeOp);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java
index 2183a9852a..f09f75aa58 100644
--- a/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java
@@ -40,7 +40,7 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac
 
     private CachedBTreePartition(TableMetadata metadata,
                                  DecoratedKey partitionKey,
-                                 Holder holder,
+                                 BTreePartitionData holder,
                                  int createdAtInSec,
                                  int cachedLiveRows,
                                  int rowsWithNonExpiringCells)
@@ -80,7 +80,7 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac
      */
     public static CachedBTreePartition create(UnfilteredRowIterator iterator, int initialRowCapacity, int nowInSec)
     {
-        Holder holder = ImmutableBTreePartition.build(iterator, initialRowCapacity);
+        BTreePartitionData holder = ImmutableBTreePartition.build(iterator, initialRowCapacity);
 
         int cachedLiveRows = 0;
         int rowsWithNonExpiringCells = 0;
@@ -180,7 +180,7 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac
             UnfilteredRowIteratorSerializer.Header header = UnfilteredRowIteratorSerializer.serializer.deserializeHeader(metadata, null, in, version, DeserializationHelper.Flag.LOCAL);
             assert !header.isReversed && header.rowEstimate >= 0;
 
-            Holder holder;
+            BTreePartitionData holder;
             try (UnfilteredRowIterator partition = UnfilteredRowIteratorSerializer.serializer.deserialize(in, version, metadata, DeserializationHelper.Flag.LOCAL, header))
             {
                 holder = ImmutableBTreePartition.build(partition, header.rowEstimate);
diff --git a/src/java/org/apache/cassandra/db/partitions/ImmutableBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/ImmutableBTreePartition.java
index 5139d40134..6617255660 100644
--- a/src/java/org/apache/cassandra/db/partitions/ImmutableBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/ImmutableBTreePartition.java
@@ -27,7 +27,7 @@ import org.apache.cassandra.db.rows.*;
 public class ImmutableBTreePartition extends AbstractBTreePartition
 {
 
-    protected final Holder holder;
+    protected final BTreePartitionData holder;
     protected final TableMetadata metadata;
 
     public ImmutableBTreePartition(TableMetadata metadata,
@@ -40,12 +40,12 @@ public class ImmutableBTreePartition extends AbstractBTreePartition
     {
         super(partitionKey);
         this.metadata = metadata;
-        this.holder = new Holder(columns, tree, deletionInfo, staticRow, stats);
+        this.holder = new BTreePartitionData(columns, tree, deletionInfo, staticRow, stats);
     }
 
     protected ImmutableBTreePartition(TableMetadata metadata,
                                       DecoratedKey partitionKey,
-                                      Holder holder)
+                                      BTreePartitionData holder)
     {
         super(partitionKey);
         this.metadata = metadata;
@@ -119,7 +119,7 @@ public class ImmutableBTreePartition extends AbstractBTreePartition
         return metadata;
     }
 
-    protected Holder holder()
+    protected BTreePartitionData holder()
     {
         return holder;
     }
diff --git a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
index 6019d56fb8..935a7b968a 100644
--- a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
+++ b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
@@ -67,7 +67,7 @@ public class PartitionUpdate extends AbstractBTreePartition
 
     public static final PartitionUpdateSerializer serializer = new PartitionUpdateSerializer();
 
-    private final Holder holder;
+    private final BTreePartitionData holder;
     private final DeletionInfo deletionInfo;
     private final TableMetadata metadata;
 
@@ -75,7 +75,7 @@ public class PartitionUpdate extends AbstractBTreePartition
 
     private PartitionUpdate(TableMetadata metadata,
                             DecoratedKey key,
-                            Holder holder,
+                            BTreePartitionData holder,
                             MutableDeletionInfo deletionInfo,
                             boolean canHaveShadowedData)
     {
@@ -97,7 +97,7 @@ public class PartitionUpdate extends AbstractBTreePartition
     public static PartitionUpdate emptyUpdate(TableMetadata metadata, DecoratedKey key)
     {
         MutableDeletionInfo deletionInfo = MutableDeletionInfo.live();
-        Holder holder = new Holder(RegularAndStaticColumns.NONE, BTree.empty(), deletionInfo, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS);
+        BTreePartitionData holder = new BTreePartitionData(RegularAndStaticColumns.NONE, BTree.empty(), deletionInfo, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS);
         return new PartitionUpdate(metadata, key, holder, deletionInfo, false);
     }
 
@@ -114,7 +114,7 @@ public class PartitionUpdate extends AbstractBTreePartition
     public static PartitionUpdate fullPartitionDelete(TableMetadata metadata, DecoratedKey key, long timestamp, int nowInSec)
     {
         MutableDeletionInfo deletionInfo = new MutableDeletionInfo(timestamp, nowInSec);
-        Holder holder = new Holder(RegularAndStaticColumns.NONE, BTree.empty(), deletionInfo, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS);
+        BTreePartitionData holder = new BTreePartitionData(RegularAndStaticColumns.NONE, BTree.empty(), deletionInfo, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS);
         return new PartitionUpdate(metadata, key, holder, deletionInfo, false);
     }
 
@@ -131,7 +131,7 @@ public class PartitionUpdate extends AbstractBTreePartition
     public static PartitionUpdate singleRowUpdate(TableMetadata metadata, DecoratedKey key, Row row, Row staticRow)
     {
         MutableDeletionInfo deletionInfo = MutableDeletionInfo.live();
-        Holder holder = new Holder(
+        BTreePartitionData holder = new BTreePartitionData(
             new RegularAndStaticColumns(
                 staticRow == null ? Columns.NONE : Columns.from(staticRow),
                 row == null ? Columns.NONE : Columns.from(row)
@@ -187,7 +187,7 @@ public class PartitionUpdate extends AbstractBTreePartition
     public static PartitionUpdate fromIterator(UnfilteredRowIterator iterator, ColumnFilter filter)
     {
         iterator = UnfilteredRowIterators.withOnlyQueriedData(iterator, filter);
-        Holder holder = build(iterator, 16);
+        BTreePartitionData holder = build(iterator, 16);
         MutableDeletionInfo deletionInfo = (MutableDeletionInfo) holder.deletionInfo;
         return new PartitionUpdate(iterator.metadata(), iterator.partitionKey(), holder, deletionInfo, false);
     }
@@ -208,7 +208,7 @@ public class PartitionUpdate extends AbstractBTreePartition
     {
         iterator = RowIterators.withOnlyQueriedData(iterator, filter);
         MutableDeletionInfo deletionInfo = MutableDeletionInfo.live();
-        Holder holder = build(iterator, deletionInfo, true);
+        BTreePartitionData holder = build(iterator, deletionInfo, true);
         return new PartitionUpdate(iterator.metadata(), iterator.partitionKey(), holder, deletionInfo, false);
     }
 
@@ -373,7 +373,7 @@ public class PartitionUpdate extends AbstractBTreePartition
         return holder.columns;
     }
 
-    protected Holder holder()
+    protected BTreePartitionData holder()
     {
         return holder;
     }
@@ -494,7 +494,7 @@ public class PartitionUpdate extends AbstractBTreePartition
     @VisibleForTesting
     public static PartitionUpdate unsafeConstruct(TableMetadata metadata,
                                                   DecoratedKey key,
-                                                  Holder holder,
+                                                  BTreePartitionData holder,
                                                   MutableDeletionInfo deletionInfo,
                                                   boolean canHaveShadowedData)
     {
@@ -690,7 +690,7 @@ public class PartitionUpdate extends AbstractBTreePartition
             MutableDeletionInfo deletionInfo = deletionBuilder.build();
             return new PartitionUpdate(metadata,
                                        header.key,
-                                       new Holder(header.sHeader.columns(), rows, deletionInfo, header.staticRow, header.sHeader.stats()),
+                                       new BTreePartitionData(header.sHeader.columns(), rows, deletionInfo, header.staticRow, header.sHeader.stats()),
                                        deletionInfo,
                                        false);
         }
@@ -800,7 +800,7 @@ public class PartitionUpdate extends AbstractBTreePartition
                        RegularAndStaticColumns columns,
                        int initialRowCapacity,
                        boolean canHaveShadowedData,
-                       Holder holder)
+                       BTreePartitionData holder)
         {
             this(metadata, key, columns, initialRowCapacity, canHaveShadowedData, holder.staticRow, holder.deletionInfo, holder.tree);
         }
@@ -909,11 +909,11 @@ public class PartitionUpdate extends AbstractBTreePartition
             isBuilt = true;
             return new PartitionUpdate(metadata,
                                        partitionKey(),
-                                       new Holder(columns,
-                                                  merged,
-                                                  deletionInfo,
-                                                  staticRow,
-                                                  newStats),
+                                       new BTreePartitionData(columns,
+                                                              merged,
+                                                              deletionInfo,
+                                                              staticRow,
+                                                              newStats),
                                        deletionInfo,
                                        canHaveShadowedData);
         }
diff --git a/src/java/org/apache/cassandra/db/tries/MemtableReadTrie.java b/src/java/org/apache/cassandra/db/tries/MemtableReadTrie.java
index e9c1e150ec..5b7919c932 100644
--- a/src/java/org/apache/cassandra/db/tries/MemtableReadTrie.java
+++ b/src/java/org/apache/cassandra/db/tries/MemtableReadTrie.java
@@ -176,9 +176,6 @@ public class MemtableReadTrie<T> extends Trie<T>
     // Offset of the next pointer in a non-shared prefix node
     static final int PREFIX_POINTER_OFFSET = LAST_POINTER_OFFSET - PREFIX_OFFSET;
 
-    // Initial capacity for the node data buffer.
-    static final int INITIAL_BUFFER_CAPACITY = 256;
-
     /**
      * Value used as null for node pointers.
      * No node can use this address (we enforce this by not allowing chain nodes to grow to position 0).
@@ -214,6 +211,11 @@ public class MemtableReadTrie<T> extends Trie<T>
     static final int CONTENTS_START_SHIFT = 4;
     static final int CONTENTS_START_SIZE = 1 << CONTENTS_START_SHIFT;
 
+    static
+    {
+        assert BUF_START_SIZE % BLOCK_SIZE == 0 : "Initial buffer size must fit a full block.";
+    }
+
     final UnsafeBuffer[] buffers;
     final AtomicReferenceArray<T>[] contentArrays;
 
diff --git a/src/java/org/apache/cassandra/db/tries/MemtableTrie.java b/src/java/org/apache/cassandra/db/tries/MemtableTrie.java
index 45893fd247..f0dbe098f6 100644
--- a/src/java/org/apache/cassandra/db/tries/MemtableTrie.java
+++ b/src/java/org/apache/cassandra/db/tries/MemtableTrie.java
@@ -75,7 +75,7 @@ public class MemtableTrie<T> extends MemtableReadTrie<T>
     // constants for space calculations
     private static final long EMPTY_SIZE_ON_HEAP;
     private static final long EMPTY_SIZE_OFF_HEAP;
-    private static final long REFERENCE_ARRAY_ON_HEAP_SIZE = ObjectSizes.measureDeep(new AtomicReferenceArray(0));
+    private static final long REFERENCE_ARRAY_ON_HEAP_SIZE = ObjectSizes.measureDeep(new AtomicReferenceArray<>(0));
 
     static
     {
@@ -91,7 +91,6 @@ public class MemtableTrie<T> extends MemtableReadTrie<T>
               new AtomicReferenceArray[29 - CONTENTS_START_SHIFT],  // takes at least 4 bytes to write pointer to one content -> 4 times smaller than buffers
               NONE);
         this.bufferType = bufferType;
-        assert INITIAL_BUFFER_CAPACITY % BLOCK_SIZE == 0;
     }
 
     // Buffer, content list and block management
@@ -117,11 +116,6 @@ public class MemtableTrie<T> extends MemtableReadTrie<T>
         getChunk(pos).putInt(inChunkPointer(pos), value);
     }
 
-    final void putIntOrdered(int pos, int value)
-    {
-        getChunk(pos).putIntOrdered(inChunkPointer(pos), value);
-    }
-
     final void putIntVolatile(int pos, int value)
     {
         getChunk(pos).putIntVolatile(inChunkPointer(pos), value);
@@ -149,20 +143,16 @@ public class MemtableTrie<T> extends MemtableReadTrie<T>
         // close to the 2G limit.
         int v = allocatedPos;
         if (inChunkPointer(v) == 0)
-            {
+        {
             int leadBit = getChunkIdx(v, BUF_START_SHIFT, BUF_START_SIZE);
-            if (leadBit == 31)
-                        throw new SpaceExhaustedException();
+            if (leadBit + BUF_START_SHIFT == 31)
+                throw new SpaceExhaustedException();
 
-            assert buffers[leadBit] == null;
             ByteBuffer newBuffer = bufferType.allocate(BUF_START_SIZE << leadBit);
             buffers[leadBit] = new UnsafeBuffer(newBuffer);
-            // The above does not contain any happens-before enforcing writes, thus at this point the new buffer may be
-            // invisible to any concurrent readers. Touching the volatile root pointer (which any new read must go
-            // through) enforces a happens-before that makes it visible to all new reads (note: when the write completes
-            // it must do some volatile write, but that will be in the new buffer and without the line below could
-            // remain unreachable by other cores).
-            root = root;
+            // Note: Since we are not moving existing data to a new buffer, we are okay with no happens-before enforcing
+            // writes. Any reader that sees a pointer in the new buffer may only do so after reading the volatile write
+            // that attached the new path.
         }
 
         allocatedPos += BLOCK_SIZE;
@@ -177,11 +167,11 @@ public class MemtableTrie<T> extends MemtableReadTrie<T>
         AtomicReferenceArray<T> array = contentArrays[leadBit];
         if (array == null)
         {
-            assert ofs == 0;
+            assert ofs == 0 : "Error in content arrays configuration.";
             contentArrays[leadBit] = array = new AtomicReferenceArray<>(CONTENTS_START_SIZE << leadBit);
         }
         array.lazySet(ofs, value); // no need for a volatile set here; at this point the item is not referenced
-                                            // by any node in the trie, and a volatile set will be made to reference it.
+                                   // by any node in the trie, and a volatile set will be made to reference it.
         return index;
     }
 
diff --git a/src/java/org/apache/cassandra/db/tries/MergeTrie.java b/src/java/org/apache/cassandra/db/tries/MergeTrie.java
index 80abb5ed69..f280776984 100644
--- a/src/java/org/apache/cassandra/db/tries/MergeTrie.java
+++ b/src/java/org/apache/cassandra/db/tries/MergeTrie.java
@@ -118,7 +118,6 @@ class MergeTrie<T> extends Trie<T>
             int c2trans = c2.incomingTransition();
             atC1 = c1trans <= c2trans;
             atC2 = c1trans >= c2trans;
-            assert atC1 | atC2;
             return c1depth;
         }
 
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
index 4786a1cbbc..7d4e26bd7d 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
@@ -253,8 +253,8 @@ public class MetadataCollector implements PartitionStatisticsCollector
 
     public MetadataCollector updateClusteringValues(ClusteringPrefix<?> clustering)
     {
-        minClustering = minClustering == null || comparator.compare(clustering, minClustering) < 0 ? clustering.minimize() : minClustering;
-        maxClustering = maxClustering == null || comparator.compare(clustering, maxClustering) > 0 ? clustering.minimize() : maxClustering;
+        minClustering = minClustering == null || comparator.compare(clustering, minClustering) < 0 ? clustering : minClustering;
+        maxClustering = maxClustering == null || comparator.compare(clustering, maxClustering) > 0 ? clustering : maxClustering;
         return this;
     }
 
@@ -267,8 +267,8 @@ public class MetadataCollector implements PartitionStatisticsCollector
     {
         Preconditions.checkState((minClustering == null && maxClustering == null)
                                  || comparator.compare(maxClustering, minClustering) >= 0);
-        ByteBuffer[] minValues = minClustering != null ? minClustering.getBufferArray() : EMPTY_CLUSTERING;
-        ByteBuffer[] maxValues = maxClustering != null ? maxClustering.getBufferArray() : EMPTY_CLUSTERING;
+        ByteBuffer[] minValues = minClustering != null ? minClustering.retainable().getBufferArray() : EMPTY_CLUSTERING;
+        ByteBuffer[] maxValues = maxClustering != null ? maxClustering.retainable().getBufferArray() : EMPTY_CLUSTERING;
         Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class);
         components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance));
         components.put(MetadataType.STATS, new StatsMetadata(estimatedPartitionSize,
diff --git a/src/java/org/apache/cassandra/metrics/MinMaxAvgMetric.java b/src/java/org/apache/cassandra/metrics/MinMaxAvgMetric.java
new file mode 100644
index 0000000000..b65f52f486
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/MinMaxAvgMetric.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Gauge;
+
+import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+
+public class MinMaxAvgMetric
+{
+    private final MetricNameFactory factory;
+    private final String namePrefix;
+
+    final Gauge<Long> minGauge;
+    final Gauge<Long> maxGauge;
+    final Gauge<Double> avgGauge;
+    final Gauge<Double> stddevGauge;
+    final Gauge<Integer> numSamplesGauge;
+
+    private long min;
+    private long max;
+    private long sum;
+    private long sumSquares;
+    private int numSamples;
+
+    public MinMaxAvgMetric(MetricNameFactory factory, String namePrefix)
+    {
+        this.factory = factory;
+        this.namePrefix = namePrefix;
+
+        minGauge = Metrics.register(factory.createMetricName(namePrefix + "Min"), () -> min);
+        maxGauge = Metrics.register(factory.createMetricName(namePrefix + "Max"), () -> max);
+        avgGauge = Metrics.register(factory.createMetricName(namePrefix + "Avg"), () -> numSamples > 0 ? ((double) sum) / numSamples : 0);
+        stddevGauge = Metrics.register(factory.createMetricName(namePrefix + "StdDev"), () -> stddev());
+        numSamplesGauge = Metrics.register(factory.createMetricName(namePrefix + "NumSamples"), () -> numSamples);
+    }
+
+    public void release()
+    {
+        Metrics.remove(factory.createMetricName(namePrefix + "Min"));
+        Metrics.remove(factory.createMetricName(namePrefix + "Max"));
+        Metrics.remove(factory.createMetricName(namePrefix + "Avg"));
+        Metrics.remove(factory.createMetricName(namePrefix + "StdDev"));
+        Metrics.remove(factory.createMetricName(namePrefix + "NumSamples"));
+    }
+
+    public void reset()
+    {
+        sum = 0;
+        sumSquares = 0;
+        max = Long.MIN_VALUE;
+        min = Long.MAX_VALUE;
+        numSamples = 0;
+    }
+
+    public void update(long value)
+    {
+        max = max > value ? max : value;
+        min = min < value ? min : value;
+        sum += value;
+        sumSquares += value * value;
+        numSamples++;
+    }
+
+    private Double stddev()
+    {
+        if (numSamples > 0)
+        {
+            double avgSquare = ((double) sumSquares) / numSamples;
+            double avg = ((double) sum) / numSamples;
+            return Math.sqrt(avgSquare - avg * avg);
+        }
+        else
+        {
+            return 0.0;
+        }
+    }
+
+    @Override
+    public String toString()
+    {
+        return "{" +
+               "min=" + min +
+               ", max=" + max +
+               ", avg=" + (sum * 1.0 / numSamples) +
+               ", stdDev=" + stddev() +
+               ", numSamples=" + numSamples +
+               '}';
+    }
+}
diff --git a/src/java/org/apache/cassandra/metrics/TrieMemtableMetricsView.java b/src/java/org/apache/cassandra/metrics/TrieMemtableMetricsView.java
new file mode 100644
index 0000000000..9343503999
--- /dev/null
+++ b/src/java/org/apache/cassandra/metrics/TrieMemtableMetricsView.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import com.codahale.metrics.Counter;
+
+import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+
+public class TrieMemtableMetricsView
+{
+    private static final String UNCONTENDED_PUTS = "Uncontended memtable puts";
+    private static final String CONTENDED_PUTS = "Contended memtable puts";
+    private static final String CONTENTION_TIME = "Contention time";
+    private static final String LAST_FLUSH_SHARD_SIZES = "Shard sizes during last flush";
+
+    // the number of memtable puts that did not need to wait on write lock
+    public final Counter uncontendedPuts;
+
+    // the number of memtable puts that needed to wait on write lock
+    public final Counter contendedPuts;
+
+    // shard put contention measurements
+    public final LatencyMetrics contentionTime;
+
+    // shard sizes distribution
+    public final MinMaxAvgMetric lastFlushShardDataSizes;
+
+    private final TrieMemtableMetricNameFactory factory;
+
+    public TrieMemtableMetricsView(String keyspace, String table)
+    {
+        factory = new TrieMemtableMetricNameFactory(keyspace, table);
+        
+        uncontendedPuts = Metrics.counter(factory.createMetricName(UNCONTENDED_PUTS));
+        contendedPuts = Metrics.counter(factory.createMetricName(CONTENDED_PUTS));
+        contentionTime = new LatencyMetrics(factory, CONTENTION_TIME);
+        lastFlushShardDataSizes = new MinMaxAvgMetric(factory, LAST_FLUSH_SHARD_SIZES);
+    }
+
+    public void release()
+    {
+        Metrics.remove(factory.createMetricName(UNCONTENDED_PUTS));
+        Metrics.remove(factory.createMetricName(CONTENDED_PUTS));
+        contentionTime.release();
+        lastFlushShardDataSizes.release();
+    }
+
+    static class TrieMemtableMetricNameFactory implements MetricNameFactory
+    {
+        private final String keyspace;
+        private final String table;
+
+        TrieMemtableMetricNameFactory(String keyspace, String table)
+        {
+            this.keyspace = keyspace;
+            this.table = table;
+        }
+
+        public CassandraMetricsRegistry.MetricName createMetricName(String metricName)
+        {
+            String groupName = TableMetrics.class.getPackage().getName();
+            String type = "TrieMemtable";
+
+            StringBuilder mbeanName = new StringBuilder();
+            mbeanName.append(groupName).append(":");
+            mbeanName.append("type=").append(type);
+            mbeanName.append(",keyspace=").append(keyspace);
+            mbeanName.append(",scope=").append(table);
+            mbeanName.append(",name=").append(metricName);
+
+            return new CassandraMetricsRegistry.MetricName(groupName, type, metricName, keyspace + "." + table, mbeanName.toString());
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java
index ba7d1be24d..bd8451c32f 100644
--- a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java
+++ b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java
@@ -681,13 +681,14 @@ public class ByteBufferUtil
 
     public static boolean canMinimize(ByteBuffer buf)
     {
-        return buf != null && (buf.capacity() > buf.remaining() || !buf.hasArray());
+        return buf != null && (!buf.hasArray() || buf.array().length > buf.remaining());
+        // Note: buf.array().length is different from buf.capacity() for sliced buffers.
     }
 
     /** trims size of bytebuffer to exactly number of bytes in it, to do not hold too much memory */
     public static ByteBuffer minimalBufferFor(ByteBuffer buf)
     {
-        return buf.capacity() > buf.remaining() || !buf.hasArray() ? ByteBuffer.wrap(getArray(buf)) : buf;
+        return !buf.hasArray() || buf.array().length > buf.remaining() ? ByteBuffer.wrap(getArray(buf)) : buf;
     }
 
     public static ByteBuffer[] minimizeBuffers(ByteBuffer[] src)
diff --git a/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md b/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md
index f360635381..604a45c849 100644
--- a/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md
+++ b/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md
@@ -668,6 +668,7 @@ Examples:
 |-8.1e-2000 | 999  | -0.0810|. -09 90  |    42·03 E7·77 DA·00
 |8.1e-2000  | -999 | 0.0810 |.  08 10  |    BE·FC 19·88 8A·00
 |8.1e2000   | 1001 | 0.0810 |.  08 10  |    C2·03 E9·88 8A·00
+
 (mexp stands for “modulated exponent”, i.e. exponent * sign)
 
 The values are prefix-free, because no exponent’s encoding can be a prefix of another, and the mantissas can never have
diff --git a/src/java/org/apache/cassandra/utils/memory/EnsureOnHeap.java b/src/java/org/apache/cassandra/utils/memory/EnsureOnHeap.java
index d66c02c8e2..34b9eaacd2 100644
--- a/src/java/org/apache/cassandra/utils/memory/EnsureOnHeap.java
+++ b/src/java/org/apache/cassandra/utils/memory/EnsureOnHeap.java
@@ -32,6 +32,8 @@ import org.apache.cassandra.utils.SearchIterator;
 
 public abstract class EnsureOnHeap extends Transformation
 {
+    public static final EnsureOnHeap NOOP = new NoOp();
+
     public abstract DecoratedKey applyToPartitionKey(DecoratedKey key);
     public abstract UnfilteredRowIterator applyToPartition(UnfilteredRowIterator partition);
     public abstract SearchIterator<Clustering<?>, Row> applyToPartition(SearchIterator<Clustering<?>, Row> partition);
diff --git a/src/java/org/apache/cassandra/utils/memory/HeapPool.java b/src/java/org/apache/cassandra/utils/memory/HeapPool.java
index 532d11f3c3..659715de4a 100644
--- a/src/java/org/apache/cassandra/utils/memory/HeapPool.java
+++ b/src/java/org/apache/cassandra/utils/memory/HeapPool.java
@@ -30,8 +30,6 @@ import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
 
 public class HeapPool extends MemtablePool
 {
-    private static final EnsureOnHeap ENSURE_NOOP = new EnsureOnHeap.NoOp();
-
     public HeapPool(long maxOnHeapMemory, float cleanupThreshold, MemtableCleaner cleaner)
     {
         super(maxOnHeapMemory, 0, cleanupThreshold, cleaner);
@@ -59,7 +57,7 @@ public class HeapPool extends MemtablePool
 
         public EnsureOnHeap ensureOnHeap()
         {
-            return ENSURE_NOOP;
+            return EnsureOnHeap.NOOP;
         }
 
         public Cloner cloner(OpOrder.Group opGroup)
@@ -124,7 +122,7 @@ public class HeapPool extends MemtablePool
             @Override
             public EnsureOnHeap ensureOnHeap()
             {
-                return ENSURE_NOOP;
+                return EnsureOnHeap.NOOP;
             }
 
             public Cloner cloner(OpOrder.Group opGroup)
diff --git a/test/conf/cassandra.yaml b/test/conf/cassandra.yaml
index 53bc206664..3329ed9f36 100644
--- a/test/conf/cassandra.yaml
+++ b/test/conf/cassandra.yaml
@@ -68,13 +68,15 @@ row_index_read_size_fail_threshold: 8192KiB
 memtable:
     configurations:
         skiplist:
-            inherits: default
             class_name: SkipListMemtable
+        trie:
+            class_name: TrieMemtable
+            parameters:
+                shards: 4
         skiplist_sharded:
             class_name: ShardedSkipListMemtable
             parameters:
                 serialize_writes: false
-                shards: 4
         skiplist_sharded_locking:
             inherits: skiplist_sharded
             parameters:
@@ -82,7 +84,6 @@ memtable:
         skiplist_remapped:
             inherits: skiplist
         test_fullname:
-            inherits: default
             class_name: org.apache.cassandra.db.memtable.TestMemtable
         test_shortname:
             class_name: TestMemtable
@@ -106,3 +107,7 @@ memtable:
             class_name: org.apache.cassandra.cql3.validation.operations.CreateTest$InvalidMemtableFactoryMethod
         test_invalid_factory_field:
             class_name: org.apache.cassandra.cql3.validation.operations.CreateTest$InvalidMemtableFactoryField
+        test_memtable_metrics:
+            class_name: TrieMemtable
+# Note: keep the memtable configuration at the end of the file, so that the default mapping can be changed without
+# duplicating the whole section above.
diff --git a/test/conf/trie_memtable.yaml b/test/conf/trie_memtable.yaml
new file mode 100644
index 0000000000..182850de2c
--- /dev/null
+++ b/test/conf/trie_memtable.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Note: this attaches at the end of cassandra.yaml, where the memtable configuration setting must be.
+        default:
+            inherits: trie
diff --git a/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java b/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java
index 34ec29ac59..7ea0f32789 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java
@@ -50,8 +50,8 @@ import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.MapType;
-import org.apache.cassandra.db.partitions.AbstractBTreePartition;
 import org.apache.cassandra.db.partitions.AtomicBTreePartition;
+import org.apache.cassandra.db.partitions.BTreePartitionData;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.BTreeRow;
 import org.apache.cassandra.db.rows.BufferCell;
@@ -240,7 +240,7 @@ public class AtomicBTreePartitionUpdateBench
             try (BulkIterator<Row> iter = BulkIterator.of(insertBuffer))
             {
                 Object[] tree = BTree.build(iter, rowCount, UpdateFunction.noOp());
-                return PartitionUpdate.unsafeConstruct(metadata, decoratedKey, AbstractBTreePartition.unsafeConstructHolder(partitionColumns, tree, DeletionInfo.LIVE, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS), NO_DELETION_INFO, false);
+                return PartitionUpdate.unsafeConstruct(metadata, decoratedKey, BTreePartitionData.unsafeConstruct(partitionColumns, tree, DeletionInfo.LIVE, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS), NO_DELETION_INFO, false);
             }
         }
 
@@ -342,12 +342,7 @@ public class AtomicBTreePartitionUpdateBench
                         public ByteBuffer allocate(int size)
                         {
                             if (invalidateOn > 0 && --invalidateOn == 0)
-                            {
-                                AbstractBTreePartition.Holder holder = update.unsafeGetHolder();
-                                if (!BTree.isEmpty(holder.tree))
-                                    update.unsafeSetHolder(AbstractBTreePartition.unsafeConstructHolder(
-                                        holder.columns, Arrays.copyOf(holder.tree, holder.tree.length), holder.deletionInfo, holder.staticRow, holder.stats));
-                            }
+                                BTreePartitionData.unsafeInvalidate(update);
                             return ByteBuffer.allocate(size);
                         }
                     };
@@ -397,7 +392,7 @@ public class AtomicBTreePartitionUpdateBench
                         ThreadLocalRandom.current().nextLong();
                 }
                 invokeBefore.accept(this);
-                update.addAllWithSizeDelta(insert[index], cloner, NO_ORDER.getCurrent(), UpdateTransaction.NO_OP);
+                update.addAll(insert[index], cloner, NO_ORDER.getCurrent(), UpdateTransaction.NO_OP);
                 return true;
             }
             finally
@@ -405,7 +400,7 @@ public class AtomicBTreePartitionUpdateBench
                 if (state.addAndGet(0x100000L) == ((((long)ifGeneration) << 40) | (((long)insert.length) << 20) | insert.length))
                 {
                     activeThreads.set(0);
-                    update.unsafeSetHolder(AbstractBTreePartition.unsafeGetEmptyHolder());
+                    update.unsafeSetHolder(BTreePartitionData.unsafeGetEmpty());
                     // reset the state and rollover the generation
                     state.set((ifGeneration + 1L) << 40);
                 }
diff --git a/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java b/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java
index a9fc57d8f5..5a86842d5c 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/instance/ReadTest.java
@@ -80,11 +80,21 @@ public abstract class ReadTest extends SimpleTableWriter
             // don't flush
         }
 
+        System.err.format("%s sstables, total %s, %,d partitions. Mean write latency %.2f ms\n",
+                          cfs.getLiveSSTables().size(),
+                          FBUtilities.prettyPrintMemory(cfs.metric.liveDiskSpaceUsed.getCount()),
+                          cfs.metric.estimatedPartitionCount.getValue(),
+                          cfs.metric.writeLatency.latency.getSnapshot().getMean());
         // Needed to stabilize sstable count for off-cache sized tests (e.g. count = 100_000_000)
         while (cfs.getLiveSSTables().size() >= 15)
         {
             cfs.enableAutoCompaction(true);
             cfs.disableAutoCompaction();
+            System.err.format("%s sstables, total %s, %,d partitions. Mean write latency %.2f ms\n",
+                              cfs.getLiveSSTables().size(),
+                              FBUtilities.prettyPrintMemory(cfs.metric.liveDiskSpaceUsed.getCount()),
+                              cfs.metric.estimatedPartitionCount.getValue(),
+                              cfs.metric.writeLatency.latency.getSnapshot().getMean());
         }
     }
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/tries/MemtableTrieWriteBench.java b/test/microbench/org/apache/cassandra/test/microbench/tries/MemtableTrieWriteBench.java
index 28b2011e66..e473a220a9 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/tries/MemtableTrieWriteBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/tries/MemtableTrieWriteBench.java
@@ -45,7 +45,7 @@ public class MemtableTrieWriteBench
     @Param({"8"})
     int keyLength = 8;
 
-    final static MemtableTrie.UpsertTransformer<Byte, Byte> resolver = (x, y) -> x;
+    final static MemtableTrie.UpsertTransformer<Byte, Byte> resolver = (x, y) -> y;
 
     // Set this to true to print the trie sizes after insertions for sanity checking.
     // This might affect the timings, do not commit with this set to true.
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
index fddd59bc4c..1881707623 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
@@ -22,6 +22,7 @@ import java.util.UUID;
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.ColumnFamilyStore;
@@ -29,7 +30,6 @@ import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.memtable.SkipListMemtable;
 import org.apache.cassandra.db.memtable.TestMemtable;
-import org.apache.cassandra.dht.OrderPreservingPartitioner;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.SyntaxException;
@@ -268,9 +268,9 @@ public class AlterTest extends CQLTester
         InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
         InetAddressAndPort remote = InetAddressAndPort.getByName("127.0.0.4");
         metadata.updateHostId(UUID.randomUUID(), local);
-        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("A"), local);
+        metadata.updateNormalToken(Util.token("A"), local);
         metadata.updateHostId(UUID.randomUUID(), remote);
-        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("B"), remote);
+        metadata.updateNormalToken(Util.token("B"), remote);
 
         // With two datacenters we should respect anything passed in as a manual override
         String ks1 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1, '" + DATA_CENTER_REMOTE + "': 3}");
@@ -326,9 +326,9 @@ public class AlterTest extends CQLTester
         InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
         InetAddressAndPort remote = InetAddressAndPort.getByName("127.0.0.4");
         metadata.updateHostId(UUID.randomUUID(), local);
-        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("A"), local);
+        metadata.updateNormalToken(Util.token("A"), local);
         metadata.updateHostId(UUID.randomUUID(), remote);
-        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("B"), remote);
+        metadata.updateNormalToken(Util.token("B"), remote);
 
         // Let's create a keyspace first with SimpleStrategy
         String ks1 = createKeyspace("CREATE KEYSPACE %s WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 2}");
@@ -363,9 +363,9 @@ public class AlterTest extends CQLTester
         InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
         InetAddressAndPort remote = InetAddressAndPort.getByName("127.0.0.4");
         metadata.updateHostId(UUID.randomUUID(), local);
-        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("A"), local);
+        metadata.updateNormalToken(Util.token("A"), local);
         metadata.updateHostId(UUID.randomUUID(), remote);
-        metadata.updateNormalToken(new OrderPreservingPartitioner.StringToken("B"), remote);
+        metadata.updateNormalToken(Util.token("B"), remote);
 
         DatabaseDescriptor.setDefaultKeyspaceRF(3);
 
@@ -560,6 +560,7 @@ public class AlterTest extends CQLTester
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))");
         assertSame(MemtableParams.DEFAULT.factory(), getCurrentColumnFamilyStore().metadata().params.memtable.factory());
         assertSchemaOption("memtable", null);
+        Class<? extends Memtable> defaultClass = getCurrentColumnFamilyStore().getTracker().getView().getCurrentMemtable().getClass();
 
         testMemtableConfig("skiplist", SkipListMemtable.FACTORY, SkipListMemtable.class);
         testMemtableConfig("test_fullname", TestMemtable.FACTORY, SkipListMemtable.class);
@@ -570,7 +571,7 @@ public class AlterTest extends CQLTester
                    + " WITH compression = {'class': 'LZ4Compressor'};");
         assertSchemaOption("memtable", "test_shortname");
 
-        testMemtableConfig("default", MemtableParams.DEFAULT.factory(), SkipListMemtable.class);
+        testMemtableConfig("default", MemtableParams.DEFAULT.factory(), defaultClass);
 
 
         assertAlterTableThrowsException(ConfigurationException.class,
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
index b72fd81ed7..6091ab52e9 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
@@ -34,6 +34,7 @@ import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.memtable.Memtable;
 import org.apache.cassandra.db.memtable.SkipListMemtable;
 import org.apache.cassandra.db.memtable.TestMemtable;
+import org.apache.cassandra.db.memtable.TrieMemtable;
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -596,14 +597,16 @@ public class CreateTest extends CQLTester
     {
         createTable("CREATE TABLE %s (a text, b int, c int, primary key (a, b))");
         assertSame(MemtableParams.DEFAULT.factory(), getCurrentColumnFamilyStore().metadata().params.memtable.factory());
+        Class<? extends Memtable> defaultClass = getCurrentColumnFamilyStore().getTracker().getView().getCurrentMemtable().getClass();
 
         assertSchemaOption("memtable", null);
 
         testMemtableConfig("skiplist", SkipListMemtable.FACTORY, SkipListMemtable.class);
+        testMemtableConfig("trie", MemtableParams.get("trie").factory(), TrieMemtable.class);
         testMemtableConfig("skiplist_remapped", SkipListMemtable.FACTORY, SkipListMemtable.class);
         testMemtableConfig("test_fullname", TestMemtable.FACTORY, SkipListMemtable.class);
         testMemtableConfig("test_shortname", SkipListMemtable.FACTORY, SkipListMemtable.class);
-        testMemtableConfig("default", MemtableParams.DEFAULT.factory(), SkipListMemtable.class);
+        testMemtableConfig("default", MemtableParams.DEFAULT.factory(), defaultClass);
 
         assertThrowsConfigurationException("The 'class_name' option must be specified.",
                                            "CREATE TABLE %s (a text, b int, c int, primary key (a, b))"
diff --git a/test/unit/org/apache/cassandra/db/ClusteringPrefixTest.java b/test/unit/org/apache/cassandra/db/ClusteringPrefixTest.java
new file mode 100644
index 0000000000..a295b22786
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/ClusteringPrefixTest.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Function;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.cassandra.db.marshal.ByteArrayAccessor;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
+import org.apache.cassandra.db.marshal.ValueAccessor;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+import org.apache.cassandra.utils.memory.MemtablePool;
+import org.apache.cassandra.utils.memory.NativeAllocator;
+import org.apache.cassandra.utils.memory.NativePool;
+import org.apache.cassandra.utils.memory.SlabAllocator;
+import org.apache.cassandra.utils.memory.SlabPool;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class ClusteringPrefixTest
+{
+    @Test
+    public void arrayTopAndBottom()
+    {
+        Assert.assertTrue(ArrayClusteringBound.BOTTOM.isBottom());
+        Assert.assertFalse(ArrayClusteringBound.BOTTOM.isTop());
+        Assert.assertTrue(ArrayClusteringBound.TOP.isTop());
+        Assert.assertFalse(ArrayClusteringBound.TOP.isBottom());
+    }
+
+    @Test
+    public void bufferTopAndBottom()
+    {
+        Assert.assertTrue(BufferClusteringBound.BOTTOM.isBottom());
+        Assert.assertFalse(BufferClusteringBound.BOTTOM.isTop());
+        Assert.assertTrue(BufferClusteringBound.TOP.isTop());
+        Assert.assertFalse(BufferClusteringBound.TOP.isBottom());
+    }
+
+    @Test
+    public void testRetainableArray()
+    {
+        testRetainable(ByteArrayAccessor.instance.factory(), x -> new byte[][] {x.getBytes(StandardCharsets.UTF_8)});
+    }
+
+    @Test
+    public void testRetainableOnHeap()
+    {
+        testRetainable(ByteBufferAccessor.instance.factory(), x -> new ByteBuffer[] {ByteBufferUtil.bytes(x)});
+    }
+
+    @Test
+    public void testRetainableOnHeapSliced()
+    {
+        for (int prepend = 0; prepend < 3; ++prepend)
+        {
+            for (int append = 0; append < 3; ++append)
+            {
+                testRetainable(ByteBufferAccessor.instance.factory(),
+                               slicingAllocator(prepend, append));
+            }
+        }
+    }
+
+    private Function<String, ByteBuffer[]> slicingAllocator(int prepend, int append)
+    {
+        return x ->
+        {
+            ByteBuffer bytes = ByteBufferUtil.bytes(x);
+            ByteBuffer sliced = ByteBuffer.allocate(bytes.remaining() + prepend + append);
+            for (int i = 0; i < prepend; ++i)
+                sliced.put((byte) ThreadLocalRandom.current().nextInt());
+            sliced.put(bytes);
+            bytes.flip();
+            for (int i = 0; i < append; ++i)
+                sliced.put((byte) ThreadLocalRandom.current().nextInt());
+            sliced.position(prepend).limit(prepend + bytes.remaining());
+            return new ByteBuffer[]{ sliced.slice() };
+        };
+    }
+
+    @Test
+    public void testRetainableOffHeap()
+    {
+        testRetainable(ByteBufferAccessor.instance.factory(), x ->
+        {
+            ByteBuffer h = ByteBufferUtil.bytes(x);
+            ByteBuffer v = ByteBuffer.allocateDirect(h.remaining());
+            v.put(h);
+            v.flip();
+            return new ByteBuffer[] {v};
+        });
+    }
+
+    @Test
+    public void testRetainableOnHeapSlab() throws InterruptedException, TimeoutException
+    {
+        testRetainableSlab(true);
+    }
+
+    @Test
+    public void testRetainableOffHeapSlab() throws InterruptedException, TimeoutException
+    {
+        testRetainableSlab(false);
+    }
+
+    public void testRetainableSlab(boolean onHeap) throws InterruptedException, TimeoutException
+    {
+        MemtablePool pool = new SlabPool(1L << 24, onHeap ? 0 : 1L << 24, 1.0f, () -> ImmediateFuture.success(false));
+        SlabAllocator allocator = ((SlabAllocator) pool.newAllocator("test"));
+        assert !allocator.allocate(1).isDirect() == onHeap;
+        try
+        {
+            testRetainable(ByteBufferAccessor.instance.factory(), x ->
+            {
+                ByteBuffer h = ByteBufferUtil.bytes(x);
+                ByteBuffer v = allocator.allocate(h.remaining());
+                v.put(h);
+                v.flip();
+                return new ByteBuffer[] {v};
+            });
+        }
+        finally
+        {
+            pool.shutdownAndWait(10, TimeUnit.SECONDS);
+        }
+    }
+
+    @Test
+    public void testRetainableNative() throws InterruptedException, TimeoutException
+    {
+        MemtablePool pool = new NativePool(1L << 24,1L << 24, 1.0f, () -> ImmediateFuture.success(false));
+        NativeAllocator allocator = (NativeAllocator) pool.newAllocator("test");
+        try
+        {
+            testRetainable(ByteBufferAccessor.instance.factory(),
+                           x -> new ByteBuffer[] {ByteBufferUtil.bytes(x)},
+                           x -> x.kind() == ClusteringPrefix.Kind.CLUSTERING
+                                ? new NativeClustering(allocator, null, (Clustering<?>) x)
+                                : x);
+        }
+        finally
+        {
+            pool.shutdownAndWait(10, TimeUnit.SECONDS);
+        }
+    }
+
+    public <V> void testRetainable(ValueAccessor.ObjectFactory<V> factory,
+                                   Function<String, V[]> allocator)
+    {
+        testRetainable(factory, allocator, null);
+    }
+
+    public <V> void testRetainable(ValueAccessor.ObjectFactory<V> factory,
+                                   Function<String, V[]> allocator,
+                                   Function<ClusteringPrefix<V>, ClusteringPrefix<V>> mapper)
+    {
+        ClusteringPrefix<V>[] clusterings = new ClusteringPrefix[]
+        {
+            factory.clustering(),
+            factory.staticClustering(),
+            factory.clustering(allocator.apply("test")),
+            factory.bound(ClusteringPrefix.Kind.INCL_START_BOUND, allocator.apply("testA")),
+            factory.bound(ClusteringPrefix.Kind.INCL_END_BOUND, allocator.apply("testB")),
+            factory.bound(ClusteringPrefix.Kind.EXCL_START_BOUND, allocator.apply("testC")),
+            factory.bound(ClusteringPrefix.Kind.EXCL_END_BOUND, allocator.apply("testD")),
+            factory.boundary(ClusteringPrefix.Kind.EXCL_END_INCL_START_BOUNDARY, allocator.apply("testE")),
+            factory.boundary(ClusteringPrefix.Kind.INCL_END_EXCL_START_BOUNDARY, allocator.apply("testF")),
+        };
+
+        if (mapper != null)
+            clusterings = Arrays.stream(clusterings)
+                                .map(mapper)
+                                .toArray(ClusteringPrefix[]::new);
+
+        testRetainable(clusterings);
+    }
+
+    public void testRetainable(ClusteringPrefix<?>[] clusterings)
+    {
+        for (ClusteringPrefix<?> clustering : clusterings)
+        {
+            ClusteringPrefix<?> retainable = clustering.retainable();
+            assertEquals(clustering, retainable);
+            assertClusteringIsRetainable(retainable);
+        }
+    }
+
+
+    public static void assertClusteringIsRetainable(ClusteringPrefix<?> clustering)
+    {
+        if (clustering instanceof AbstractArrayClusteringPrefix)
+            return; // has to be on-heap and minimized
+
+        assertTrue(clustering instanceof AbstractBufferClusteringPrefix);
+        AbstractBufferClusteringPrefix abcf = (AbstractBufferClusteringPrefix) clustering;
+        ByteBuffer[] buffers = abcf.getBufferArray();
+        for (ByteBuffer b : buffers)
+        {
+            assertFalse(b.isDirect());
+            assertTrue(b.hasArray());
+            assertEquals(b.capacity(), b.remaining());
+            assertEquals(0, b.arrayOffset());
+            assertEquals(b.capacity(), b.array().length);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java
index 0d539a80cc..b2cfa3e586 100644
--- a/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java
+++ b/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java
@@ -57,7 +57,8 @@ public class MemtableQuickTest extends CQLTester
     {
         return ImmutableList.of("skiplist",
                                 "skiplist_sharded",
-                                "skiplist_sharded_locking");
+                                "skiplist_sharded_locking",
+                                "trie");
     }
 
     @BeforeClass
diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeHeapBuffersTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeHeapBuffersTest.java
index b448b35ffb..4497e8cd83 100644
--- a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeHeapBuffersTest.java
+++ b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeHeapBuffersTest.java
@@ -20,14 +20,11 @@ package org.apache.cassandra.db.memtable;
 
 import org.junit.Assert;
 import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.utils.memory.MemtablePool;
 import org.apache.cassandra.utils.memory.SlabPool;
 
-@RunWith(Parameterized.class)
 public class MemtableSizeHeapBuffersTest extends MemtableSizeTestBase
 {
     // Overrides CQLTester.setUpClass to run before it
@@ -43,6 +40,6 @@ public class MemtableSizeHeapBuffersTest extends MemtableSizeTestBase
         MemtablePool memoryPool = AbstractAllocatorMemtable.MEMORY_POOL;
         logger.info("Memtable pool {} off-heap limit {}", memoryPool, memoryPool.offHeap.limit);
         Assert.assertTrue(memoryPool instanceof SlabPool);
-        Assert.assertTrue(memoryPool.offHeap.limit == 0);
+        Assert.assertEquals(0, memoryPool.offHeap.limit);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapBuffersTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapBuffersTest.java
index 64f77e2082..022f4f1792 100644
--- a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapBuffersTest.java
+++ b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapBuffersTest.java
@@ -20,14 +20,11 @@ package org.apache.cassandra.db.memtable;
 
 import org.junit.Assert;
 import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.utils.memory.MemtablePool;
 import org.apache.cassandra.utils.memory.SlabPool;
 
-@RunWith(Parameterized.class)
 public class MemtableSizeOffheapBuffersTest extends MemtableSizeTestBase
 {
     // Overrides CQLTester.setUpClass to run before it
@@ -46,4 +43,4 @@ public class MemtableSizeOffheapBuffersTest extends MemtableSizeTestBase
         Assert.assertTrue(memoryPool instanceof SlabPool);
         Assert.assertTrue(memoryPool.offHeap.limit > 0);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapObjectsTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapObjectsTest.java
index b2aa12cda0..559f456d14 100644
--- a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapObjectsTest.java
+++ b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeOffheapObjectsTest.java
@@ -20,14 +20,11 @@ package org.apache.cassandra.db.memtable;
 
 import org.junit.Assert;
 import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.utils.memory.MemtablePool;
 import org.apache.cassandra.utils.memory.NativePool;
 
-@RunWith(Parameterized.class)
 public class MemtableSizeOffheapObjectsTest extends MemtableSizeTestBase
 {
     // Overrides CQLTester.setUpClass to run before it
@@ -41,7 +38,7 @@ public class MemtableSizeOffheapObjectsTest extends MemtableSizeTestBase
     void checkMemtablePool()
     {
         MemtablePool memoryPool = AbstractAllocatorMemtable.MEMORY_POOL;
-        System.out.println("Memtable pool " + memoryPool + " off-heap limit " + memoryPool.offHeap.limit);
+        logger.info("Memtable pool {} off-heap limit {}", memoryPool, memoryPool.offHeap.limit);
         Assert.assertTrue(memoryPool instanceof NativePool);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeTestBase.java b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeTestBase.java
index 252d7bf14e..8325163ded 100644
--- a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeTestBase.java
+++ b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeTestBase.java
@@ -21,10 +21,10 @@ package org.apache.cassandra.db.memtable;
 import java.lang.reflect.Field;
 import java.util.List;
 
-import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
 import org.junit.Assert;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import org.slf4j.Logger;
@@ -41,6 +41,7 @@ import org.github.jamm.MemoryMeter;
 
 // Note: This test can be run in idea with the allocation type configured in the test yaml and memtable using the
 // value memtableClass is initialized with.
+@RunWith(Parameterized.class)
 public class MemtableSizeTestBase extends CQLTester
 {
     // Note: To see a printout of the usage for each object, add .enableDebug() here (most useful with smaller number of
@@ -63,7 +64,8 @@ public class MemtableSizeTestBase extends CQLTester
     public static List<Object> parameters()
     {
         return ImmutableList.of("skiplist",
-                                "skiplist_sharded");
+                                "skiplist_sharded",
+                                "trie");
     }
 
     // Must be within 3% of the real usage. We are actually more precise than this, but the threshold is set higher to
@@ -84,7 +86,7 @@ public class MemtableSizeTestBase extends CQLTester
         }
         catch (NoSuchFieldException | IllegalAccessException e)
         {
-            throw Throwables.propagate(e);
+            throw new RuntimeException(e);
         }
 
         CQLTester.setUpClass();
@@ -165,10 +167,15 @@ public class MemtableSizeTestBase extends CQLTester
 
             long expectedHeap = deepSizeAfter - deepSizeBefore;
             long max_difference = MAX_DIFFERENCE_PERCENT * expectedHeap / 100;
+            long trie_overhead = memtable instanceof TrieMemtable ? ((TrieMemtable) memtable).unusedReservedMemory() : 0;
             switch (DatabaseDescriptor.getMemtableAllocationType())
             {
                 case heap_buffers:
                     max_difference += SLAB_OVERHEAD;
+                    actualHeap += trie_overhead;    // adjust trie memory with unused buffer space if on-heap
+                    break;
+                case unslabbed_heap_buffers:
+                    actualHeap += trie_overhead;    // adjust trie memory with unused buffer space if on-heap
                     break;
             }
             String message = String.format("Expected heap usage close to %s, got %s, %s difference.\n",
@@ -196,4 +203,4 @@ public class MemtableSizeTestBase extends CQLTester
             execute(String.format("DROP KEYSPACE IF EXISTS %s", keyspace));
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeUnslabbedTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeUnslabbedTest.java
index 1d2d460660..b59a4749f0 100644
--- a/test/unit/org/apache/cassandra/db/memtable/MemtableSizeUnslabbedTest.java
+++ b/test/unit/org/apache/cassandra/db/memtable/MemtableSizeUnslabbedTest.java
@@ -20,14 +20,11 @@ package org.apache.cassandra.db.memtable;
 
 import org.junit.Assert;
 import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.utils.memory.HeapPool;
 import org.apache.cassandra.utils.memory.MemtablePool;
 
-@RunWith(Parameterized.class)
 public class MemtableSizeUnslabbedTest extends MemtableSizeTestBase
 {
     // Overrides CQLTester.setUpClass to run before it
@@ -44,4 +41,4 @@ public class MemtableSizeUnslabbedTest extends MemtableSizeTestBase
         logger.info("Memtable pool {} off-heap limit {}", memoryPool, memoryPool.offHeap.limit);
         Assert.assertTrue(memoryPool instanceof HeapPool);
     }
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/memtable/ShardedMemtableConfigTest.java b/test/unit/org/apache/cassandra/db/memtable/ShardedMemtableConfigTest.java
new file mode 100644
index 0000000000..ef5079b35e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/memtable/ShardedMemtableConfigTest.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.memtable;
+
+import java.io.IOException;
+import javax.management.Attribute;
+import javax.management.AttributeNotFoundException;
+import javax.management.InstanceNotFoundException;
+import javax.management.InvalidAttributeValueException;
+import javax.management.MBeanException;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.ReflectionException;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.db.memtable.AbstractShardedMemtable.SHARDED_MEMTABLE_CONFIG_OBJECT_NAME;
+import static org.junit.Assert.assertEquals;
+
+public class ShardedMemtableConfigTest extends CQLTester
+{
+    @BeforeClass
+    public static void setup() throws Exception
+    {
+        startJMXServer();
+        createMBeanServerConnection();
+    }
+
+    @Test
+    public void testDefaultShardCountSetByJMX() throws MalformedObjectNameException, ReflectionException, AttributeNotFoundException, InstanceNotFoundException, MBeanException, IOException, InvalidAttributeValueException, InterruptedException
+    {
+        // check the default, but also make sure the class is initialized if the default memtable is not sharded
+        assertEquals(FBUtilities.getAvailableProcessors(), AbstractShardedMemtable.getDefaultShardCount());
+        jmxConnection.setAttribute(new ObjectName(SHARDED_MEMTABLE_CONFIG_OBJECT_NAME), new Attribute("DefaultShardCount", "7"));
+        assertEquals(7, AbstractShardedMemtable.getDefaultShardCount());
+        assertEquals("7", jmxConnection.getAttribute(new ObjectName(SHARDED_MEMTABLE_CONFIG_OBJECT_NAME), "DefaultShardCount"));
+    }
+
+    @Test
+    public void testAutoShardCount() throws MalformedObjectNameException, ReflectionException, AttributeNotFoundException, InstanceNotFoundException, MBeanException, IOException, InvalidAttributeValueException
+    {
+        AbstractShardedMemtable.getDefaultShardCount();    // initialize class
+        jmxConnection.setAttribute(new ObjectName(SHARDED_MEMTABLE_CONFIG_OBJECT_NAME), new Attribute("DefaultShardCount", "auto"));
+        assertEquals(FBUtilities.getAvailableProcessors(), AbstractShardedMemtable.getDefaultShardCount());
+        assertEquals(Integer.toString(FBUtilities.getAvailableProcessors()),
+                     jmxConnection.getAttribute(new ObjectName(SHARDED_MEMTABLE_CONFIG_OBJECT_NAME), "DefaultShardCount"));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/tries/MemtableTriePutTest.java b/test/unit/org/apache/cassandra/db/tries/MemtableTriePutTest.java
index 6ff8871478..dfa18a55ed 100644
--- a/test/unit/org/apache/cassandra/db/tries/MemtableTriePutTest.java
+++ b/test/unit/org/apache/cassandra/db/tries/MemtableTriePutTest.java
@@ -77,7 +77,7 @@ public class MemtableTriePutTest extends MemtableTrieTestBase
         Assert.assertNull(trie.get(ByteComparable.of(t2)));
         Assert.assertFalse(trie.reachedAllocatedSizeThreshold());
 
-        trie.advanceAllocatedPos(0x40001000);  // over 1G
+        trie.advanceAllocatedPos(MemtableTrie.ALLOCATED_SIZE_THRESHOLD + 0x1000);
         trie.putRecursive(ByteComparable.of(t2), t2, (x, y) -> y);
         Assert.assertEquals(t1, trie.get(ByteComparable.of(t1)));
         Assert.assertEquals(t2, trie.get(ByteComparable.of(t2)));
@@ -119,5 +119,7 @@ public class MemtableTriePutTest extends MemtableTrieTestBase
         Assert.assertEquals(t2, trie.get(ByteComparable.of(t2)));
         Assert.assertNull(trie.get(ByteComparable.of(t3)));
         Assert.assertTrue(trie.reachedAllocatedSizeThreshold());
+
+        trie.discardBuffers();
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/tries/MemtableTrieTestBase.java b/test/unit/org/apache/cassandra/db/tries/MemtableTrieTestBase.java
index 23143cdc35..251227e0ab 100644
--- a/test/unit/org/apache/cassandra/db/tries/MemtableTrieTestBase.java
+++ b/test/unit/org/apache/cassandra/db/tries/MemtableTrieTestBase.java
@@ -313,6 +313,8 @@ public abstract class MemtableTrieTestBase
 
         assertSameContent(trie, content);
         checkGet(trie, content);
+
+        trie.discardBuffers();
     }
 
     @Test
diff --git a/src/java/org/apache/cassandra/db/tries/TrieToDot.java b/test/unit/org/apache/cassandra/db/tries/TrieToDot.java
similarity index 97%
rename from src/java/org/apache/cassandra/db/tries/TrieToDot.java
rename to test/unit/org/apache/cassandra/db/tries/TrieToDot.java
index 26059d11c5..fd47c0be3d 100644
--- a/src/java/org/apache/cassandra/db/tries/TrieToDot.java
+++ b/test/unit/org/apache/cassandra/db/tries/TrieToDot.java
@@ -22,7 +22,7 @@ import java.util.function.Function;
 import org.agrona.DirectBuffer;
 
 /**
- * Simple utility class for dumping the structure of a trie to string.
+ * A class for dumping the structure of a trie to a graphviz/dot representation for making trie graphs.
  */
 class TrieToDot<T> extends TriePathReconstructor implements Trie.Walker<T, String>
 {
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
index 2e5a17ad42..01ecb94b19 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.io.sstable;
 
+import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.util.ArrayList;
 import java.util.List;
@@ -37,6 +38,7 @@ import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 public class SSTableMetadataTest
@@ -218,9 +220,9 @@ public class SSTableMetadataTest
         {
             assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0col100");
             assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "7col149");
-            // make sure the clustering values are minimised
-            assertTrue(sstable.getSSTableMetadata().minClusteringValues.get(0).capacity() < 50);
-            assertTrue(sstable.getSSTableMetadata().maxClusteringValues.get(0).capacity() < 50);
+            // make sure stats don't reference native or off-heap data
+            assertBuffersAreRetainable(sstable.getSSTableMetadata().minClusteringValues);
+            assertBuffersAreRetainable(sstable.getSSTableMetadata().maxClusteringValues);
         }
         String key = "row2";
 
@@ -240,9 +242,39 @@ public class SSTableMetadataTest
         {
             assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0col100");
             assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "9col298");
-            // and make sure the clustering values are still minimised after compaction
-            assertTrue(sstable.getSSTableMetadata().minClusteringValues.get(0).capacity() < 50);
-            assertTrue(sstable.getSSTableMetadata().maxClusteringValues.get(0).capacity() < 50);
+            // make sure stats don't reference native or off-heap data
+            assertBuffersAreRetainable(sstable.getSSTableMetadata().minClusteringValues);
+            assertBuffersAreRetainable(sstable.getSSTableMetadata().maxClusteringValues);
+        }
+
+        key = "row3";
+        new RowUpdateBuilder(store.metadata(), System.currentTimeMillis(), key)
+            .addRangeTombstone("0", "7")
+            .build()
+            .apply();
+
+        store.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS);
+        store.forceMajorCompaction();
+        assertEquals(1, store.getLiveSSTables().size());
+        for (SSTableReader sstable : store.getLiveSSTables())
+        {
+            assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0");
+            assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "9col298");
+            // make sure stats don't reference native or off-heap data
+            assertBuffersAreRetainable(sstable.getSSTableMetadata().minClusteringValues);
+            assertBuffersAreRetainable(sstable.getSSTableMetadata().maxClusteringValues);
+        }
+    }
+
+    public static void assertBuffersAreRetainable(List<ByteBuffer> buffers)
+    {
+        for (ByteBuffer b : buffers)
+        {
+            assertFalse(b.isDirect());
+            assertTrue(b.hasArray());
+            assertEquals(b.capacity(), b.remaining());
+            assertEquals(0, b.arrayOffset());
+            assertEquals(b.capacity(), b.array().length);
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java b/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java
new file mode 100644
index 0000000000..b92d2bdc51
--- /dev/null
+++ b/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.metrics;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.Session;
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.OverrideConfigurationLoader;
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.memtable.AbstractShardedMemtable;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.service.EmbeddedCassandraService;
+import org.apache.cassandra.service.StorageService;
+import org.jboss.byteman.contrib.bmunit.BMRule;
+import org.jboss.byteman.contrib.bmunit.BMRules;
+import org.jboss.byteman.contrib.bmunit.BMUnitRunner;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+@RunWith(BMUnitRunner.class)
+public class TrieMemtableMetricsTest extends SchemaLoader
+{
+    private static final int NUM_SHARDS = 13;
+
+    private static final Logger logger = LoggerFactory.getLogger(TrieMemtableMetricsTest.class);
+    private static Session session;
+
+    private static final String KEYSPACE = "triememtable";
+    private static final String TABLE = "metricstest";
+
+    @BeforeClass
+    public static void loadSchema() throws ConfigurationException
+    {
+        // shadow superclass method; we'll call it directly
+        // after tinkering with the Config
+    }
+
+    @BeforeClass
+    public static void setup() throws ConfigurationException, IOException
+    {
+        OverrideConfigurationLoader.override((config) -> {
+            config.partitioner = "Murmur3Partitioner";
+        });
+        System.setProperty(AbstractShardedMemtable.DEFAULT_SHARD_COUNT_PROPERTY, "" + NUM_SHARDS);
+
+        SchemaLoader.loadSchema();
+
+        EmbeddedCassandraService cassandra = new EmbeddedCassandraService();
+        cassandra.start();
+
+        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build();
+        session = cluster.connect();
+
+        session.execute(String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };", KEYSPACE));
+    }
+
+    private ColumnFamilyStore recreateTable()
+    {
+        return recreateTable(TABLE);
+    }
+
+    private ColumnFamilyStore recreateTable(String table)
+    {
+        session.execute(String.format("DROP TABLE IF EXISTS %s.%s", KEYSPACE, table));
+        session.execute(String.format("CREATE TABLE IF NOT EXISTS %s.%s (id int, val1 text, val2 text, PRIMARY KEY(id, val1)) WITH MEMTABLE = 'test_memtable_metrics';", KEYSPACE, table));
+        return ColumnFamilyStore.getIfExists(KEYSPACE, table);
+    }
+
+    @Test
+    public void testRegularStatementsAreCounted()
+    {
+        ColumnFamilyStore cfs = recreateTable();
+        TrieMemtableMetricsView metrics = getMemtableMetrics(cfs);
+        assertEquals(0, metrics.contendedPuts.getCount());
+        assertEquals(0, metrics.uncontendedPuts.getCount());
+
+        for (int i = 0; i < 10; i++)
+        {
+            session.execute(String.format("INSERT INTO %s.%s (id, val1, val2) VALUES (%d, '%s', '%s')", KEYSPACE, TABLE, i, "val" + i, "val" + i));
+        }
+
+        long allPuts = metrics.contendedPuts.getCount() + metrics.uncontendedPuts.getCount();
+        assertEquals(10, allPuts);
+    }
+
+    @Test
+    public void testFlushRelatedMetrics() throws IOException, ExecutionException, InterruptedException
+    {
+        ColumnFamilyStore cfs = recreateTable();
+        TrieMemtableMetricsView metrics = getMemtableMetrics(cfs);
+
+        StorageService.instance.forceKeyspaceFlush(KEYSPACE, TABLE);
+        assertEquals(0, metrics.contendedPuts.getCount() + metrics.uncontendedPuts.getCount());
+
+        writeAndFlush(10);
+        assertEquals(10, metrics.contendedPuts.getCount() + metrics.uncontendedPuts.getCount());
+
+        // verify that metrics survive flush / memtable switching
+        writeAndFlush(10);
+        assertEquals(20, metrics.contendedPuts.getCount() + metrics.uncontendedPuts.getCount());
+        assertEquals(metrics.lastFlushShardDataSizes.toString(), NUM_SHARDS, metrics.lastFlushShardDataSizes.numSamplesGauge.getValue().intValue());
+    }
+
+    @Test
+    @BMRules(rules = { @BMRule(name = "Delay memtable update",
+    targetClass = "MemtableTrie",
+    targetMethod = "putSingleton",
+    action = "java.lang.Thread.sleep(10)")})
+    public void testContentionMetrics() throws IOException, ExecutionException, InterruptedException
+    {
+        ColumnFamilyStore cfs = recreateTable();
+        TrieMemtableMetricsView metrics = getMemtableMetrics(cfs);
+        assertEquals(0, (int) metrics.lastFlushShardDataSizes.numSamplesGauge.getValue());
+
+        StorageService.instance.forceKeyspaceFlush(KEYSPACE, TABLE);
+
+        writeAndFlush(100);
+
+        ByteArrayOutputStream stream = new ByteArrayOutputStream();
+        metrics.contentionTime.latency.getSnapshot().dump(stream);
+
+        assertEquals(100, metrics.contendedPuts.getCount() + metrics.uncontendedPuts.getCount());
+        assertThat(metrics.contendedPuts.getCount(), greaterThan(0L));
+        assertThat(metrics.contentionTime.totalLatency.getCount(), greaterThan(0L));
+    }
+
+    @Test
+    public void testMetricsCleanupOnDrop()
+    {
+        String tableName = TABLE + "_metrics_cleanup";
+        CassandraMetricsRegistry registry = CassandraMetricsRegistry.Metrics;
+        Supplier<Stream<String>> metrics = () -> registry.getNames().stream().filter(m -> m.contains(tableName));
+
+        // no metrics before creating
+        assertEquals(0, metrics.get().count());
+
+        recreateTable(tableName);
+        // some metrics
+        assertTrue(metrics.get().count() > 0);
+
+        session.execute(String.format("DROP TABLE IF EXISTS %s.%s", KEYSPACE, tableName));
+        // no metrics after drop
+        assertEquals(metrics.get().collect(Collectors.joining(",")), 0, metrics.get().count());
+    }
+
+    private TrieMemtableMetricsView getMemtableMetrics(ColumnFamilyStore cfs)
+    {
+        return new TrieMemtableMetricsView(cfs.keyspace.getName(), cfs.name);
+    }
+
+    private void writeAndFlush(int rows) throws IOException, ExecutionException, InterruptedException
+    {
+        logger.info("writing {} rows", rows);
+        Future[] futures = new Future[rows];
+        for (int i = 0; i < rows; i++)
+        {
+            logger.info("writing {} row", i);
+            futures[i] = session.executeAsync(String.format("INSERT INTO %s.%s (id, val1, val2) VALUES (%d, '%s', '%s')", KEYSPACE, TABLE, i, "val" + i, "val" + i));
+        }
+        for (int i = 0; i < rows; i++)
+        {
+            futures[i].get();
+            logger.info("writing {} row completed", i);
+        }
+        logger.info("forcing flush");
+        StorageService.instance.forceKeyspaceFlush(KEYSPACE, TABLE);
+        logger.info("table flushed");
+    }
+
+    @AfterClass
+    public static void teardown()
+    {
+        session.close();
+    }
+}
diff --git a/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java b/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java
index 4b2090ffff..16bf32d2a3 100644
--- a/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java
+++ b/test/unit/org/apache/cassandra/tools/BulkLoaderTest.java
@@ -39,7 +39,8 @@ public class BulkLoaderTest extends OfflineToolUtils
         
         assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread",
                                                         "Shutdown-checker",
-                                                        "cluster[0-9]-connection-reaper-[0-9]" });
+                                                        "cluster[0-9]-connection-reaper-[0-9]" },
+                                         false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -67,7 +68,8 @@ public class BulkLoaderTest extends OfflineToolUtils
                                                         "Shutdown-checker",
                                                         "cluster[0-9]-connection-reaper-[0-9]",
                                                         "Attach Listener",
-                                                        "process reaper"});
+                                                        "process reaper"},
+                                         false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -97,7 +99,8 @@ public class BulkLoaderTest extends OfflineToolUtils
                                                         "Shutdown-checker",
                                                         "cluster[0-9]-connection-reaper-[0-9]",
                                                         "Attach Listener",
-                                                        "process reaper"});
+                                                        "process reaper"},
+                                         false);
     assertSchemaNotLoaded();
     assertCLSMNotLoaded();
     assertSystemKSNotLoaded();
@@ -127,7 +130,8 @@ public class BulkLoaderTest extends OfflineToolUtils
                                                         "Shutdown-checker",
                                                         "cluster[0-9]-connection-reaper-[0-9]",
                                                         "Attach Listener",
-                                                        "process reaper"});
+                                                        "process reaper"},
+                                         false);
     assertSchemaNotLoaded();
     assertCLSMNotLoaded();
     assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/GetVersionTest.java b/test/unit/org/apache/cassandra/tools/GetVersionTest.java
index 9eaf57f551..b3ddb58328 100644
--- a/test/unit/org/apache/cassandra/tools/GetVersionTest.java
+++ b/test/unit/org/apache/cassandra/tools/GetVersionTest.java
@@ -29,7 +29,7 @@ public class GetVersionTest extends OfflineToolUtils
     {
         ToolResult tool = ToolRunner.invokeClass(GetVersion.class);
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
index 3bb2825527..47681f1862 100644
--- a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
+++ b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
@@ -30,7 +30,11 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
 
+import com.google.common.collect.Iterables;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.util.File;
 import org.apache.commons.io.FileUtils;
 import org.junit.BeforeClass;
@@ -74,7 +78,12 @@ public abstract class OfflineToolUtils
     "Attach Listener", // spawned in intellij IDEA
     };
 
-    public void assertNoUnexpectedThreadsStarted(String[] optionalThreadNames)
+    static final String[] NON_DEFAULT_MEMTABLE_THREADS =
+    {
+    "((Native|Slab|Heap)Pool|Logged)Cleaner"
+    };
+
+    public void assertNoUnexpectedThreadsStarted(String[] optionalThreadNames, boolean allowNonDefaultMemtableThreads)
     {
         ThreadMXBean threads = ManagementFactory.getThreadMXBean();
 
@@ -87,10 +96,15 @@ public abstract class OfflineToolUtils
                                     .filter(Objects::nonNull)
                                     .map(ThreadInfo::getThreadName)
                                     .collect(Collectors.toSet());
+        Iterable<String> optionalNames = optionalThreadNames != null
+                                         ? Arrays.asList(optionalThreadNames)
+                                         : Collections.emptyList();
+        if (allowNonDefaultMemtableThreads && DatabaseDescriptor.getMemtableConfigurations().containsKey("default"))
+            optionalNames = Iterables.concat(optionalNames, Arrays.asList(NON_DEFAULT_MEMTABLE_THREADS));
 
-        List<Pattern> optional = optionalThreadNames != null
-                                 ? Arrays.stream(optionalThreadNames).map(Pattern::compile).collect(Collectors.toList())
-                                 : Collections.emptyList();
+        List<Pattern> optional = StreamSupport.stream(optionalNames.spliterator(), false)
+                                              .map(Pattern::compile)
+                                              .collect(Collectors.toList());
 
         current.removeAll(initial);
 
@@ -219,7 +233,7 @@ public abstract class OfflineToolUtils
     
     protected void assertCorrectEnvPostTest()
     {
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, true);
         assertSchemaLoaded();
         assertServerNotLoaded();
     }
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java b/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java
index e76769e0ac..10b3f5dabc 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExpiredBlockersTest.java
@@ -37,7 +37,7 @@ public class SSTableExpiredBlockersTest extends OfflineToolUtils
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
 
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java b/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java
index 1530a2eca8..c708cd42ea 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExportSchemaLoadingTest.java
@@ -182,7 +182,7 @@ public class SSTableExportSchemaLoadingTest extends OfflineToolUtils
      */
     private void assertPostTestEnv()
     {
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, false);
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
         assertKeyspaceNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExportTest.java b/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
index a5b70a2f76..3a4ead493b 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
@@ -108,7 +108,7 @@ public class SSTableExportTest extends OfflineToolUtils
      */
     private void assertPostTestEnv()
     {
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java b/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java
index e6d7adc765..0d609d3605 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableLevelResetterTest.java
@@ -36,7 +36,7 @@ public class SSTableLevelResetterTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java b/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java
index baf7c8c1cf..6728fdf391 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableMetadataViewerTest.java
@@ -55,7 +55,7 @@ public class SSTableMetadataViewerTest extends OfflineToolUtils
             assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Options:"));
             assertEquals(1, tool.getExitCode());
         }
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -218,7 +218,7 @@ public class SSTableMetadataViewerTest extends OfflineToolUtils
 
     private void assertGoodEnvPostTest()
     {
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java b/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java
index 3f4314c0d6..15f62978bb 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableOfflineRelevelTest.java
@@ -36,7 +36,7 @@ public class SSTableOfflineRelevelTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java b/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java
index 3531075b1a..9d3a609d13 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java
@@ -41,7 +41,7 @@ public class SSTableRepairedAtSetterTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         Assertions.assertThat(tool.getCleanedStderr()).isEmpty();
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -79,7 +79,7 @@ public class SSTableRepairedAtSetterTest extends OfflineToolUtils
                                                        "--is-repaired",
                                                        findOneSSTable("legacy_sstables", "legacy_ma_simple"));
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -95,7 +95,7 @@ public class SSTableRepairedAtSetterTest extends OfflineToolUtils
                                                  "--is-unrepaired",
                                                  findOneSSTable("legacy_sstables", "legacy_ma_simple"));
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -113,7 +113,7 @@ public class SSTableRepairedAtSetterTest extends OfflineToolUtils
         String file = tmpFile.absolutePath();
         ToolResult tool = ToolRunner.invokeClass(SSTableRepairedAtSetter.class, "--really-set", "--is-repaired", "-f", file);
         tool.assertOnCleanExit();
-        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA);
+        assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
diff --git a/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java b/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java
index 1a99643674..d23832797a 100644
--- a/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java
+++ b/test/unit/org/apache/cassandra/tools/ToolsSchemaLoadingTest.java
@@ -34,7 +34,7 @@ public class ToolsSchemaLoadingTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -49,7 +49,7 @@ public class ToolsSchemaLoadingTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -64,7 +64,7 @@ public class ToolsSchemaLoadingTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("No sstables to split"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -79,7 +79,7 @@ public class ToolsSchemaLoadingTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();
@@ -94,7 +94,7 @@ public class ToolsSchemaLoadingTest extends OfflineToolUtils
         assertThat(tool.getStdout(), CoreMatchers.containsStringIgnoringCase("usage:"));
         assertThat(tool.getCleanedStderr(), CoreMatchers.containsStringIgnoringCase("Missing arguments"));
         assertEquals(1, tool.getExitCode());
-        assertNoUnexpectedThreadsStarted(null);
+        assertNoUnexpectedThreadsStarted(null, false);
         assertSchemaNotLoaded();
         assertCLSMNotLoaded();
         assertSystemKSNotLoaded();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org