You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by be...@apache.org on 2021/09/29 18:40:21 UTC

[cassandra] 01/01: [CASSANDRA-16923] CEP-10 Phase 1: Mockable System Clock

This is an automated email from the ASF dual-hosted git repository.

benedict pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 15a2fe00fc9817874f4c1600d56a373c21b1ad1c
Author: Benedict Elliott Smith <be...@apache.org>
AuthorDate: Mon Jan 18 13:36:58 2021 +0000

    [CASSANDRA-16923] CEP-10 Phase 1: Mockable System Clock
    
    Co-authored-by: Benedict Elliott Smith <be...@apache.org>
    Co-authored-by: Aleksey Yeschenko  <al...@apache.org>
    Co-authored-by: Sam Tunnicliffe <sa...@apache.org>
---
 build.xml                                          |  30 +++-
 checkstyle.xml                                     |  48 ++++++
 checkstyle_suppressions.xml                        |  26 +++
 .../org/apache/cassandra/audit/AuditLogEntry.java  |   6 +-
 .../apache/cassandra/auth/CassandraAuthorizer.java |   9 +-
 .../cassandra/auth/CassandraNetworkAuthorizer.java |   5 +-
 .../cassandra/auth/CassandraRoleManager.java       |   5 +-
 .../cassandra/auth/PasswordAuthenticator.java      |   3 +-
 .../apache/cassandra/batchlog/BatchlogManager.java |   6 +-
 .../apache/cassandra/cache/AutoSavingCache.java    |  14 +-
 .../org/apache/cassandra/concurrent/SEPWorker.java |   5 +-
 .../cassandra/concurrent/SharedExecutorPool.java   |   5 +-
 src/java/org/apache/cassandra/cql3/Lists.java      |   3 +-
 .../org/apache/cassandra/cql3/QueryProcessor.java  |   5 +-
 .../apache/cassandra/cql3/UntypedResultSet.java    |   4 +-
 .../cassandra/cql3/functions/UDAggregate.java      |   5 +-
 .../cassandra/cql3/functions/UDFunction.java       |   9 +-
 .../cassandra/cql3/statements/BatchStatement.java  |   3 +-
 .../cql3/statements/ModificationStatement.java     |   3 +-
 .../cassandra/cql3/statements/SelectStatement.java |   3 +-
 .../cassandra/cql3/statements/UseStatement.java    |   4 +-
 .../org/apache/cassandra/db/ColumnFamilyStore.java |  25 +--
 .../org/apache/cassandra/db/CounterMutation.java   |   5 +-
 .../cassandra/db/CounterMutationVerbHandler.java   |   4 +-
 .../db/ExpirationDateOverflowHandling.java         |   4 +-
 src/java/org/apache/cassandra/db/Keyspace.java     |   9 +-
 src/java/org/apache/cassandra/db/Memtable.java     |   6 +-
 src/java/org/apache/cassandra/db/ReadCommand.java  |   5 +-
 .../org/apache/cassandra/db/RepairedDataInfo.java  |   6 +-
 .../apache/cassandra/db/SizeEstimatesRecorder.java |   6 +-
 .../org/apache/cassandra/db/SystemKeyspace.java    |   8 +-
 .../db/commitlog/AbstractCommitLogService.java     |   7 +-
 .../cassandra/db/commitlog/CommitLogSegment.java   |   3 +-
 .../db/commitlog/PeriodicCommitLogService.java     |   4 +-
 .../db/compaction/AbstractCompactionStrategy.java  |   5 +-
 .../cassandra/db/compaction/CompactionLogger.java  |   4 +-
 .../cassandra/db/compaction/CompactionManager.java |   9 +-
 .../cassandra/db/compaction/CompactionTask.java    |  23 +--
 .../compaction/DateTieredCompactionStrategy.java   |   5 +-
 .../db/compaction/LeveledGenerations.java          |   8 +-
 .../compaction/TimeWindowCompactionStrategy.java   |   5 +-
 .../org/apache/cassandra/db/lifecycle/LogFile.java |   5 +-
 .../apache/cassandra/db/marshal/TemporalType.java  |   4 +-
 .../db/partitions/AtomicBTreePartition.java        |   4 +-
 .../cassandra/db/repair/PendingAntiCompaction.java |   9 +-
 .../org/apache/cassandra/db/view/TableViews.java   |   8 +-
 .../apache/cassandra/db/view/ViewBuilderTask.java  |   4 +-
 .../cassandra/db/virtual/AbstractVirtualTable.java |   6 +-
 .../org/apache/cassandra/diag/DiagnosticEvent.java |   4 +-
 .../cassandra/diag/LastEventIdBroadcaster.java     |   6 +-
 .../org/apache/cassandra/gms/EndpointState.java    |   6 +-
 .../cassandra/gms/GossipDigestAckVerbHandler.java  |   3 +-
 src/java/org/apache/cassandra/gms/Gossiper.java    |  24 +--
 .../cassandra/hadoop/cql3/CqlInputFormat.java      |   4 +-
 src/java/org/apache/cassandra/hints/Hint.java      |   3 +-
 .../cassandra/hints/HintsCleanupTrigger.java       |   4 +-
 .../org/apache/cassandra/hints/HintsReader.java    |   6 +-
 .../org/apache/cassandra/hints/HintsStore.java     |   4 +-
 .../index/sasi/disk/PerSSTableIndexWriter.java     |  10 +-
 .../cassandra/index/sasi/plan/QueryController.java |   6 +-
 .../cassandra/io/sstable/CQLSSTableWriter.java     |   4 +-
 .../io/sstable/format/SSTableReaderBuilder.java    |  12 +-
 .../io/sstable/format/big/BigTableWriter.java      |   4 +-
 .../apache/cassandra/locator/TokenMetadata.java    |   5 +-
 .../org/apache/cassandra/metrics/TableMetrics.java |   5 +-
 .../org/apache/cassandra/net/AsyncPromise.java     |   5 +-
 .../org/apache/cassandra/net/MessagingService.java |   5 +-
 .../org/apache/cassandra/net/RequestCallbacks.java |   3 +-
 .../net/StartupClusterConnectivityChecker.java     |   9 +-
 .../org/apache/cassandra/repair/RepairJob.java     |  10 +-
 .../apache/cassandra/repair/RepairRunnable.java    |  14 +-
 src/java/org/apache/cassandra/repair/SyncTask.java |   6 +-
 .../apache/cassandra/repair/ValidationManager.java |   6 +-
 .../repair/consistent/CoordinatorSession.java      |  14 +-
 .../cassandra/schema/MigrationCoordinator.java     |   4 +-
 .../cassandra/serializers/TimestampSerializer.java |   4 +-
 .../service/AbstractWriteResponseHandler.java      |   5 +-
 .../cassandra/service/ActiveRepairService.java     |   3 +-
 .../org/apache/cassandra/service/ClientState.java  |   6 +-
 .../org/apache/cassandra/service/GCInspector.java  |   6 +-
 .../service/PendingRangeCalculatorService.java     |   6 +-
 .../apache/cassandra/service/StartupChecks.java    |   3 +-
 .../org/apache/cassandra/service/StorageProxy.java |  42 ++---
 .../apache/cassandra/service/StorageService.java   |   8 +-
 .../cassandra/service/TruncateResponseHandler.java |   6 +-
 .../service/pager/AggregationQueryPager.java       |   6 +-
 .../service/pager/MultiPartitionPager.java         |   4 +-
 .../service/paxos/AbstractPaxosCallback.java       |   3 +-
 .../apache/cassandra/service/paxos/PaxosState.java |  14 +-
 .../cassandra/service/reads/DigestResolver.java    |   5 +-
 .../cassandra/service/reads/ReadCallback.java      |   3 +-
 .../service/reads/range/RangeCommandIterator.java  |   6 +-
 .../reads/repair/BlockingPartitionRepair.java      |   5 +-
 .../apache/cassandra/streaming/StreamSession.java  |   5 +-
 .../async/NettyStreamingMessageSender.java         |   6 +-
 .../management/StreamEventJMXNotifier.java         |   6 +-
 .../apache/cassandra/tools/BootstrapMonitor.java   |   4 +-
 .../org/apache/cassandra/tools/BulkLoader.java     |   8 +-
 .../apache/cassandra/tools/JsonTransformer.java    |   6 +-
 .../org/apache/cassandra/tools/RepairRunner.java   |   4 +-
 .../cassandra/tools/SSTableExpiredBlockers.java    |   4 +-
 .../cassandra/tools/SSTableMetadataViewer.java     |   7 +-
 .../apache/cassandra/tools/StandaloneScrubber.java |   4 +-
 .../apache/cassandra/tools/StandaloneSplitter.java |   3 +-
 .../apache/cassandra/tools/nodetool/Snapshot.java  |   3 +-
 .../apache/cassandra/tracing/TraceStateImpl.java   |   6 +-
 .../org/apache/cassandra/tracing/TracingImpl.java  |   4 +-
 .../org/apache/cassandra/transport/Dispatcher.java |   3 +-
 .../transport/ProtocolVersionTracker.java          |   6 +-
 .../apache/cassandra/transport/SimpleClient.java   |   6 +-
 .../cassandra/transport/messages/BatchMessage.java |   4 +-
 .../transport/messages/ExecuteMessage.java         |   4 +-
 .../transport/messages/PrepareMessage.java         |   4 +-
 .../cassandra/transport/messages/QueryMessage.java |   4 +-
 .../apache/cassandra/utils/ApproximateTime.java    | 192 ---------------------
 src/java/org/apache/cassandra/utils/Clock.java     | 110 ++++++++++++
 .../cassandra/utils/DiagnosticSnapshotService.java |   5 +-
 .../org/apache/cassandra/utils/ExecutorUtils.java  |   5 +-
 .../cassandra/utils/ExpiringMemoizingSupplier.java |   4 +-
 .../org/apache/cassandra/utils/FBUtilities.java    |  14 +-
 .../org/apache/cassandra/utils/GuidGenerator.java  |   3 +-
 .../org/apache/cassandra/utils/MonotonicClock.java |  13 +-
 .../org/apache/cassandra/utils/NoSpamLogger.java   |   4 +-
 .../apache/cassandra/utils/SlidingTimeRate.java    | 171 ------------------
 .../apache/cassandra/utils/SystemTimeSource.java   |  54 ------
 .../org/apache/cassandra/utils/TimeSource.java     |  58 -------
 src/java/org/apache/cassandra/utils/UUIDGen.java   |   6 +-
 .../cassandra/utils/binlog/ExternalArchiver.java   |   7 +-
 .../cassandra/utils/concurrent/IntervalLock.java   |  69 --------
 .../utils/concurrent/SimpleCondition.java          |   4 +-
 .../cassandra/utils/concurrent/WaitQueue.java      |  10 +-
 .../utils/progress/jmx/JMXProgressSupport.java     |   4 +-
 .../concurrent/LongSharedExecutorPoolTest.java     |  18 +-
 .../apache/cassandra/net/ConnectionBurnTest.java   |   9 +-
 test/burn/org/apache/cassandra/net/Reporters.java  |   6 +-
 test/burn/org/apache/cassandra/net/Verifier.java   |   7 +-
 .../apache/cassandra/transport/DriverBurnTest.java |   5 +-
 .../cassandra/transport/SimpleClientPerfTest.java  |   5 +-
 .../cassandra/utils/memory/LongBufferPoolTest.java |   7 +-
 .../cassandra/distributed/impl/Coordinator.java    |   7 +-
 .../cassandra/distributed/impl/Instance.java       |  10 +-
 .../HostReplacementAbruptDownedInstanceTest.java   |   5 +-
 .../org/apache/cassandra/cql3/CachingBench.java    |   5 +-
 .../apache/cassandra/cql3/GcCompactionBench.java   |   6 +-
 .../db/compaction/LongCompactionsTest.java         |   6 +-
 .../io/compress/CompressorPerformance.java         |  10 +-
 .../cassandra/streaming/LongStreamingTest.java     |  17 +-
 .../test/microbench/BatchStatementBench.java       |   3 +-
 .../cassandra/test/microbench/MessageOutBench.java |   3 +-
 .../DebuggableThreadPoolExecutorTest.java          |   6 +-
 .../cassandra/cql3/CustomNowInSecondsTest.java     |   8 +-
 .../cassandra/cql3/PstmtPersistenceTest.java       |   5 +-
 .../cql3/validation/entities/CollectionsTest.java  |   3 +-
 .../cql3/validation/entities/JsonTest.java         |   3 +-
 .../validation/entities/SecondaryIndexTest.java    |   3 +-
 test/unit/org/apache/cassandra/db/CleanupTest.java |   5 +-
 .../org/apache/cassandra/db/DirectoriesTest.java   |   3 +-
 .../cassandra/db/RangeTombstoneListTest.java       |   3 +-
 .../cassandra/db/commitlog/BatchCommitLogTest.java |   9 +-
 .../CorruptedSSTablesCompactionsTest.java          |   3 +-
 .../db/lifecycle/RealTransactionsTest.java         |   7 +-
 .../db/monitoring/MonitoringTaskTest.java          |  48 +++---
 .../cassandra/io/sstable/IndexSummaryTest.java     |   5 +-
 .../io/sstable/SSTableCorruptionDetectionTest.java |   3 +-
 .../io/util/BufferedDataOutputStreamTest.java      |   3 +-
 .../io/util/FileSegmentInputStreamTest.java        |   3 +-
 .../cassandra/io/util/MmappedRegionsTest.java      |   3 +-
 .../cassandra/io/util/NIODataInputStreamTest.java  |   3 +-
 .../cassandra/io/util/RandomAccessReaderTest.java  |   3 +-
 .../cassandra/locator/TokenMetadataTest.java       |   2 +-
 .../DecayingEstimatedHistogramReservoirTest.java   |   3 +-
 .../apache/cassandra/net/AsyncOneResponseTest.java |   5 +-
 .../cassandra/net/AsyncStreamingInputPlusTest.java |   5 +-
 .../org/apache/cassandra/net/ConnectionTest.java   |  12 +-
 .../apache/cassandra/service/PaxosStateTest.java   |   5 +-
 .../service/WriteResponseHandlerTest.java          |   6 +-
 .../cassandra/service/reads/DataResolverTest.java  |  62 +++----
 .../cassandra/service/reads/ReadExecutorTest.java  |  13 +-
 .../reads/range/RangeCommandIteratorTest.java      |  11 +-
 .../service/reads/range/RangeCommandsTest.java     |   7 +-
 .../reads/repair/AbstractReadRepairTest.java       |   5 +-
 .../reads/repair/BlockingReadRepairTest.java       |   5 +-
 .../repair/DiagEventsBlockingReadRepairTest.java   |   5 +-
 .../service/reads/repair/ReadRepairTest.java       |   6 +-
 .../apache/cassandra/tools/OfflineToolUtils.java   |   5 +-
 .../cassandra/triggers/TriggersSchemaTest.java     |   7 +-
 .../apache/cassandra/triggers/TriggersTest.java    |   7 +-
 .../apache/cassandra/utils/MonotonicClockTest.java |   5 +-
 .../cassandra/utils/SlidingTimeRateTest.java       | 161 -----------------
 .../org/apache/cassandra/utils/TestTimeSource.java |  72 --------
 .../apache/cassandra/utils/binlog/BinLogTest.java  |   3 +-
 .../io/sstable/StressCQLSSTableWriter.java         |   4 +-
 .../org/apache/cassandra/stress/StressAction.java  |   6 +-
 .../cassandra/stress/report/StressMetrics.java     |  15 +-
 .../org/apache/cassandra/stress/report/Timer.java  |   6 +-
 195 files changed, 997 insertions(+), 1251 deletions(-)

diff --git a/build.xml b/build.xml
index 97ea512..dc3f547 100644
--- a/build.xml
+++ b/build.xml
@@ -534,6 +534,7 @@
           </dependency>
           <dependency groupId="org.apache.cassandra" artifactId="dtest-api" version="0.0.9" scope="test"/>
           <dependency groupId="org.reflections" artifactId="reflections" version="0.9.12" scope="test"/>
+          <dependency groupId="com.puppycrawl.tools" artifactId="checkstyle" version="8.40" scope="test"/>
           <dependency groupId="org.apache.hadoop" artifactId="hadoop-core" version="1.0.3" scope="provided">
             <exclusion groupId="org.mortbay.jetty" artifactId="servlet-api"/>
             <exclusion groupId="commons-logging" artifactId="commons-logging"/>
@@ -718,6 +719,7 @@
         <dependency groupId="junit" artifactId="junit"/>
         <dependency groupId="commons-io" artifactId="commons-io"/>
         <dependency groupId="org.mockito" artifactId="mockito-core"/>
+        <dependency groupId="com.puppycrawl.tools" artifactId="checkstyle" scope="test"/>
         <dependency groupId="org.quicktheories" artifactId="quicktheories"/>
         <dependency groupId="org.reflections" artifactId="reflections"/>
         <dependency groupId="com.google.code.java-allocation-instrumenter" artifactId="java-allocation-instrumenter" version="${allocation-instrumenter.version}"/>
@@ -869,7 +871,7 @@
     <!--
         The build target builds all the .class files
     -->
-    <target name="build" depends="resolver-retrieve-build,build-project" description="Compile Cassandra classes"/>
+    <target name="build" depends="resolver-retrieve-build,build-project,checkstyle" description="Compile Cassandra classes"/>
     <target name="codecoverage" depends="jacoco-run,jacoco-report" description="Create code coverage report"/>
 
     <target name="_build_java">
@@ -2100,6 +2102,32 @@
         </java>
   </target>
 
+  <target name="init-checkstyle" depends="maven-ant-tasks-retrieve-build,build-project">
+      <path id="checkstyle.lib.path">
+          <fileset dir="${test.lib}/jars" includes="*.jar"/>
+      </path>
+      <!-- Sevntu custom checks are retrieved by Ivy into lib folder
+         and will be accessible to checkstyle-->
+      <taskdef resource="com/puppycrawl/tools/checkstyle/ant/checkstyle-ant-task.properties"
+               classpathref="checkstyle.lib.path"/>
+  </target>
+
+  <target name="checkstyle" depends="init-checkstyle,maven-ant-tasks-retrieve-build,build-project" description="Run custom checkstyle code analysis" if="java.version.8">
+      <property name="checkstyle.log.dir" value="${build.dir}/checkstyle" />
+      <property name="checkstyle.report.file" value="${checkstyle.log.dir}/checkstyle_report.xml"/>
+      <mkdir  dir="${checkstyle.log.dir}" />
+
+      <property name="checkstyle.properties" value="${basedir}/checkstyle.xml" />
+      <property name="checkstyle.suppressions" value="${basedir}/checkstyle_suppressions.xml" />
+      <checkstyle config="${checkstyle.properties}"
+                  failureProperty="checkstyle.failure"
+                  failOnViolation="true">
+          <formatter type="plain"/>
+          <formatter type="xml" tofile="${checkstyle.report.file}"/>
+          <fileset dir="${build.src.java}" includes="**/*.java"/>
+      </checkstyle>
+  </target>
+
 
   <!-- Installs artifacts to local Maven repository -->
   <target name="mvn-install"
diff --git a/checkstyle.xml b/checkstyle.xml
new file mode 100644
index 0000000..8a90cc7
--- /dev/null
+++ b/checkstyle.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<!DOCTYPE module PUBLIC
+          "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
+          "https://checkstyle.org/dtds/configuration_1_3.dtd">
+
+<module name="Checker">
+  <property name="severity" value="error"/>
+
+  <property name="fileExtensions" value="java, properties, xml"/>
+
+  <module name="BeforeExecutionExclusionFileFilter">
+    <property name="fileNamePattern" value="module\-info\.java$"/>
+  </module>
+
+  <!-- https://checkstyle.org/config_filters.html#SuppressionFilter -->
+  <module name="SuppressionFilter">
+    <property name="file" value="${checkstyle.suppressions}"
+              default="checkstyle-suppressions.xml" />
+    <property name="optional" value="false"/>
+  </module>
+
+  <module name="TreeWalker">
+
+    <module name="RegexpSinglelineJava">
+      <!-- To prevent static imports -->
+      <property name="format" value="System\.(currentTimeMillis|nanoTime)"/>
+      <property name="ignoreComments" value="true"/>
+    </module>
+  </module>
+
+</module>
diff --git a/checkstyle_suppressions.xml b/checkstyle_suppressions.xml
new file mode 100644
index 0000000..13ce561
--- /dev/null
+++ b/checkstyle_suppressions.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<!DOCTYPE suppressions PUBLIC
+        "-//Checkstyle//DTD SuppressionFilter Configuration 1.1//EN"
+        "https://checkstyle.org/dtds/suppressions_1_1.dtd">
+
+<suppressions>
+  <suppress checks="RegexpSinglelineJava" files="Clock\.java"/>
+</suppressions>
diff --git a/src/java/org/apache/cassandra/audit/AuditLogEntry.java b/src/java/org/apache/cassandra/audit/AuditLogEntry.java
index 4d3b867..3a015c5 100644
--- a/src/java/org/apache/cassandra/audit/AuditLogEntry.java
+++ b/src/java/org/apache/cassandra/audit/AuditLogEntry.java
@@ -32,6 +32,8 @@ import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class AuditLogEntry
 {
     private final InetAddressAndPort host = FBUtilities.getBroadcastAddressAndPort();
@@ -214,7 +216,7 @@ public class AuditLogEntry
                 user = AuthenticatedUser.SYSTEM_USER.getName();
             }
 
-            timestamp = System.currentTimeMillis();
+            timestamp = currentTimeMillis();
         }
 
         public Builder(AuditLogEntry entry)
@@ -312,7 +314,7 @@ public class AuditLogEntry
 
         public AuditLogEntry build()
         {
-            timestamp = timestamp > 0 ? timestamp : System.currentTimeMillis();
+            timestamp = timestamp > 0 ? timestamp : currentTimeMillis();
             return new AuditLogEntry(type, source, user, timestamp, batch, keyspace, scope, operation, options, state);
         }
     }
diff --git a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
index 318b7f3..60d5a1f 100644
--- a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
@@ -40,6 +40,11 @@ import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.cql3.BatchQueryOptions.withoutPerStatementVariables;
+import static org.apache.cassandra.cql3.QueryOptions.DEFAULT;
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * CassandraAuthorizer is an IAuthorizer implementation that keeps
  * user permissions internally in C* using the system_auth.role_permissions
@@ -352,7 +357,7 @@ public class CassandraAuthorizer implements IAuthorizer
 
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(QueryState.forInternalCalls(), options, nanoTime());
     }
 
     UntypedResultSet process(String query, ConsistencyLevel cl) throws RequestExecutionException
@@ -366,7 +371,7 @@ public class CassandraAuthorizer implements IAuthorizer
         QueryProcessor.instance.processBatch(statement,
                                              QueryState.forInternalCalls(),
                                              BatchQueryOptions.withoutPerStatementVariables(options),
-                                             System.nanoTime());
+                                             nanoTime());
     }
 
     public static ConsistencyLevel authWriteConsistencyLevel()
diff --git a/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java b/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java
index 107cd85..4384f7a 100644
--- a/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/CassandraNetworkAuthorizer.java
@@ -35,6 +35,9 @@ import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CassandraNetworkAuthorizer implements INetworkAuthorizer
 {
     private SelectStatement authorizeUserStatement = null;
@@ -50,7 +53,7 @@ public class CassandraNetworkAuthorizer implements INetworkAuthorizer
     @VisibleForTesting
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(forInternalCalls(), options, nanoTime());
     }
 
     @VisibleForTesting
diff --git a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
index f4c78ac..a79fb12 100644
--- a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
@@ -48,6 +48,9 @@ import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.mindrot.jbcrypt.BCrypt;
 
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Responsible for the creation, maintenance and deletion of roles
  * for the purposes of authentication and authorization.
@@ -548,6 +551,6 @@ public class CassandraRoleManager implements IRoleManager
     @VisibleForTesting
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(forInternalCalls(), options, nanoTime());
     }
 }
\ No newline at end of file
diff --git a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
index 50b86f7..098ed9f 100644
--- a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
+++ b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
@@ -45,6 +45,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 import org.mindrot.jbcrypt.BCrypt;
 
 import static org.apache.cassandra.auth.CassandraRoleManager.consistencyForRoleRead;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * PasswordAuthenticator is an IAuthenticator implementation
@@ -130,7 +131,7 @@ public class PasswordAuthenticator implements IAuthenticator
     @VisibleForTesting
     ResultMessage.Rows select(SelectStatement statement, QueryOptions options)
     {
-        return statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+        return statement.execute(QueryState.forInternalCalls(), options, nanoTime());
     }
 
     public Set<DataResource> protectedResources()
diff --git a/src/java/org/apache/cassandra/batchlog/BatchlogManager.java b/src/java/org/apache/cassandra/batchlog/BatchlogManager.java
index 65ed71e..799acbc 100644
--- a/src/java/org/apache/cassandra/batchlog/BatchlogManager.java
+++ b/src/java/org/apache/cassandra/batchlog/BatchlogManager.java
@@ -83,6 +83,8 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternalWithPaging;
 import static org.apache.cassandra.net.Verb.MUTATION_REQ;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class BatchlogManager implements BatchlogManagerMBean
 {
@@ -213,7 +215,7 @@ public class BatchlogManager implements BatchlogManagerMBean
         }
         setRate(DatabaseDescriptor.getBatchlogReplayThrottleInKB());
 
-        UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
+        UUID limitUuid = UUIDGen.maxTimeUUID(currentTimeMillis() - getBatchlogTimeout());
         ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
         int pageSize = calculatePageSize(store);
         // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
@@ -491,7 +493,7 @@ public class BatchlogManager implements BatchlogManagerMBean
 
             ReplicaPlan.ForTokenWrite replicaPlan = new ReplicaPlan.ForTokenWrite(keyspace, liveAndDown.replicationStrategy(),
                     ConsistencyLevel.ONE, liveRemoteOnly.pending(), liveRemoteOnly.all(), liveRemoteOnly.all(), liveRemoteOnly.all());
-            ReplayWriteResponseHandler<Mutation> handler = new ReplayWriteResponseHandler<>(replicaPlan, System.nanoTime());
+            ReplayWriteResponseHandler<Mutation> handler = new ReplayWriteResponseHandler<>(replicaPlan, nanoTime());
             Message<Mutation> message = Message.outWithFlag(MUTATION_REQ, mutation, MessageFlag.CALL_BACK_ON_FAILURE);
             for (Replica replica : liveRemoteOnly.all())
                 MessagingService.instance().sendWriteWithCallback(message, replica, handler, false);
diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 34f056a..d18baab 100644
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@ -53,6 +53,8 @@ import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K, V>
 {
     public interface IStreamFactory
@@ -158,7 +160,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
     public ListenableFuture<Integer> loadSavedAsync()
     {
         final ListeningExecutorService es = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
-        final long start = System.nanoTime();
+        final long start = nanoTime();
 
         ListenableFuture<Integer> cacheLoad = es.submit(new Callable<Integer>()
         {
@@ -175,7 +177,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
             {
                 if (size() > 0)
                     logger.info("Completed loading ({} ms; {} keys) {} cache",
-                            TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start),
+                            TimeUnit.NANOSECONDS.toMillis(nanoTime() - start),
                             CacheService.instance.keyCache.size(),
                             cacheType);
                 es.shutdown();
@@ -188,7 +190,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
     public int loadSaved()
     {
         int count = 0;
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         // modern format, allows both key and value (so key cache load can be purely sequential)
         File dataPath = getCacheDataPath(CURRENT_VERSION);
@@ -276,7 +278,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
         }
         if (logger.isTraceEnabled())
             logger.trace("completed reading ({} ms; {} keys) saved cache {}",
-                    TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start), count, dataPath);
+                    TimeUnit.NANOSECONDS.toMillis(nanoTime() - start), count, dataPath);
         return count;
     }
 
@@ -347,7 +349,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
                 return;
             }
 
-            long start = System.nanoTime();
+            long start = nanoTime();
 
             Pair<File, File> cacheFilePaths = tempCacheFiles();
             try (WrappedDataOutputStreamPlus writer = new WrappedDataOutputStreamPlus(streamFactory.getOutputStream(cacheFilePaths.left, cacheFilePaths.right)))
@@ -401,7 +403,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
             if (!cacheFilePaths.right.renameTo(crcFile))
                 logger.error("Unable to rename {} to {}", cacheFilePaths.right, crcFile);
 
-            logger.info("Saved {} ({} items) in {} ms", cacheType, keysWritten, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+            logger.info("Saved {} ({} items) in {} ms", cacheType, keysWritten, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
         }
 
         private Pair<File, File> tempCacheFiles()
diff --git a/src/java/org/apache/cassandra/concurrent/SEPWorker.java b/src/java/org/apache/cassandra/concurrent/SEPWorker.java
index de5185d..efb1884 100644
--- a/src/java/org/apache/cassandra/concurrent/SEPWorker.java
+++ b/src/java/org/apache/cassandra/concurrent/SEPWorker.java
@@ -30,6 +30,7 @@ import org.apache.cassandra.utils.JVMStabilityInspector;
 
 import static org.apache.cassandra.concurrent.SEPExecutor.TakeTaskPermitResult.RETURNED_WORK_PERMIT;
 import static org.apache.cassandra.concurrent.SEPExecutor.TakeTaskPermitResult.TOOK_PERMIT;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnable
 {
@@ -257,7 +258,7 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
         sleep *= ThreadLocalRandom.current().nextDouble();
         sleep = Math.max(10000, sleep);
 
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         // place ourselves in the spinning collection; if we clash with another thread just exit
         Long target = start + sleep;
@@ -269,7 +270,7 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
         pool.spinning.remove(target, this);
 
         // finish timing and grab spinningTime (before we finish timing so it is under rather than overestimated)
-        long end = System.nanoTime();
+        long end = nanoTime();
         long spin = end - start;
         long stopCheck = pool.stopCheck.addAndGet(spin);
         maybeStop(stopCheck, end);
diff --git a/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java b/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
index 7a07cf4..bba8e84 100644
--- a/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
+++ b/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
 
 import static org.apache.cassandra.concurrent.SEPWorker.Work;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * A pool of worker threads that are shared between all Executors created with it. Each executor is treated as a distinct
@@ -128,10 +129,10 @@ public class SharedExecutorPool
 
         terminateWorkers();
 
-        long until = System.nanoTime() + unit.toNanos(timeout);
+        long until = nanoTime() + unit.toNanos(timeout);
         for (SEPExecutor executor : executors)
         {
-            executor.shutdown.await(until - System.nanoTime(), TimeUnit.NANOSECONDS);
+            executor.shutdown.await(until - nanoTime(), TimeUnit.NANOSECONDS);
             if (!executor.isTerminated())
                 throw new TimeoutException(executor.name + " not terminated");
         }
diff --git a/src/java/org/apache/cassandra/cql3/Lists.java b/src/java/org/apache/cassandra/cql3/Lists.java
index 1d94d69..ece822d 100644
--- a/src/java/org/apache/cassandra/cql3/Lists.java
+++ b/src/java/org/apache/cassandra/cql3/Lists.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.cql3;
 
 import static org.apache.cassandra.cql3.Constants.UNSET_VALUE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -536,7 +537,7 @@ public abstract class Lists
             {
                 if (remainingInBatch == 0)
                 {
-                    long time = PrecisionTime.REFERENCE_TIME - (System.currentTimeMillis() - PrecisionTime.REFERENCE_TIME);
+                    long time = PrecisionTime.REFERENCE_TIME - (currentTimeMillis() - PrecisionTime.REFERENCE_TIME);
                     remainingInBatch = Math.min(PrecisionTime.MAX_NANOS, i) + 1;
                     pt = PrecisionTime.getNext(time, remainingInBatch);
                 }
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index 87829ab..dc1b7a7 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -58,6 +58,7 @@ import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.*;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class QueryProcessor implements QueryHandler
 {
@@ -274,7 +275,7 @@ public class QueryProcessor implements QueryHandler
         QueryState queryState = QueryState.forInternalCalls();
         QueryOptions options = QueryOptions.forInternalCalls(cl, values);
         CQLStatement statement = instance.parse(query, queryState, options);
-        ResultMessage result = instance.process(statement, queryState, options, System.nanoTime());
+        ResultMessage result = instance.process(statement, queryState, options, nanoTime());
         if (result instanceof ResultMessage.Rows)
             return UntypedResultSet.create(((ResultMessage.Rows)result).result);
         else
@@ -339,7 +340,7 @@ public class QueryProcessor implements QueryHandler
         try
         {
             Prepared prepared = prepareInternal(query);
-            ResultMessage result = prepared.statement.execute(state, makeInternalOptions(prepared.statement, values, cl), System.nanoTime());
+            ResultMessage result = prepared.statement.execute(state, makeInternalOptions(prepared.statement, values, cl), nanoTime());
             if (result instanceof ResultMessage.Rows)
                 return UntypedResultSet.create(((ResultMessage.Rows)result).result);
             else
diff --git a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
index f4ac99f..05ebda9 100644
--- a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
+++ b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
@@ -37,6 +37,8 @@ import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.AbstractIterator;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /** a utility for doing internal cql-based queries */
 public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
 {
@@ -269,7 +271,7 @@ public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
                         if (pager.isExhausted())
                             return endOfData();
 
-                        try (PartitionIterator iter = pager.fetchPage(pageSize, cl, clientState, System.nanoTime()))
+                        try (PartitionIterator iter = pager.fetchPage(pageSize, cl, clientState, nanoTime()))
                         {
                             currentPage = select.process(iter, nowInSec).rows.iterator();
                         }
diff --git a/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java b/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java
index b201f09..92185d9 100644
--- a/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java
+++ b/src/java/org/apache/cassandra/cql3/functions/UDAggregate.java
@@ -39,6 +39,7 @@ import org.apache.cassandra.transport.ProtocolVersion;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.transform;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Base class for user-defined-aggregates.
@@ -182,7 +183,7 @@ public class UDAggregate extends AbstractFunction implements AggregateFunction,
             {
                 maybeInit(protocolVersion);
 
-                long startTime = System.nanoTime();
+                long startTime = nanoTime();
                 stateFunctionCount++;
                 if (stateFunction instanceof UDFunction)
                 {
@@ -194,7 +195,7 @@ public class UDAggregate extends AbstractFunction implements AggregateFunction,
                 {
                     throw new UnsupportedOperationException("UDAs only support UDFs");
                 }
-                stateFunctionDuration += (System.nanoTime() - startTime) / 1000;
+                stateFunctionDuration += (nanoTime() - startTime) / 1000;
             }
 
             private void maybeInit(ProtocolVersion protocolVersion)
diff --git a/src/java/org/apache/cassandra/cql3/functions/UDFunction.java b/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
index b62c18f..550905f 100644
--- a/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
@@ -61,6 +61,7 @@ import org.apache.cassandra.utils.JVMStabilityInspector;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.transform;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Base class for User Defined Functions.
@@ -356,7 +357,7 @@ public abstract class UDFunction extends AbstractFunction implements ScalarFunct
         if (!isCallableWrtNullable(parameters))
             return null;
 
-        long tStart = System.nanoTime();
+        long tStart = nanoTime();
         parameters = makeEmptyParametersNull(parameters);
 
         try
@@ -366,7 +367,7 @@ public abstract class UDFunction extends AbstractFunction implements ScalarFunct
                                 ? executeAsync(protocolVersion, parameters)
                                 : executeUserDefined(protocolVersion, parameters);
 
-            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (System.nanoTime() - tStart) / 1000);
+            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (nanoTime() - tStart) / 1000);
             return result;
         }
         catch (InvalidRequestException e)
@@ -395,7 +396,7 @@ public abstract class UDFunction extends AbstractFunction implements ScalarFunct
         if (!calledOnNullInput && firstParam == null || !isCallableWrtNullable(parameters))
             return null;
 
-        long tStart = System.nanoTime();
+        long tStart = nanoTime();
         parameters = makeEmptyParametersNull(parameters);
 
         try
@@ -404,7 +405,7 @@ public abstract class UDFunction extends AbstractFunction implements ScalarFunct
             Object result = DatabaseDescriptor.enableUserDefinedFunctionsThreads()
                                 ? executeAggregateAsync(protocolVersion, firstParam, parameters)
                                 : executeAggregateUserDefined(protocolVersion, firstParam, parameters);
-            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (System.nanoTime() - tStart) / 1000);
+            Tracing.trace("Executed UDF {} in {}\u03bcs", name(), (nanoTime() - tStart) / 1000);
             return result;
         }
         catch (InvalidRequestException e)
diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
index 80bd437..054541c 100644
--- a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
@@ -51,6 +51,7 @@ import org.apache.cassandra.utils.Pair;
 import static java.util.function.Predicate.isEqual;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * A <code>BATCH</code> statement parsed from a CQL query.
@@ -550,7 +551,7 @@ public class BatchStatement implements CQLStatement
         if (hasConditions)
             return executeInternalWithConditions(batchOptions, queryState);
 
-        executeInternalWithoutCondition(queryState, batchOptions, System.nanoTime());
+        executeInternalWithoutCondition(queryState, batchOptions, nanoTime());
         return new ResultMessage.Void();
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
index 4ff9928..ca7d6d0 100644
--- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
@@ -62,6 +62,7 @@ import org.apache.cassandra.utils.UUIDGen;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNull;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /*
  * Abstract parent class of individual modifications, i.e. INSERT, UPDATE and DELETE.
@@ -622,7 +623,7 @@ public abstract class ModificationStatement implements CQLStatement
     {
         return hasConditions()
                ? executeInternalWithCondition(queryState, options)
-               : executeInternalWithoutCondition(queryState, options, System.nanoTime());
+               : executeInternalWithoutCondition(queryState, options, nanoTime());
     }
 
     public ResultMessage executeInternalWithoutCondition(QueryState queryState, QueryOptions options, long queryStartNanoTime)
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index 25499b2..956e50b 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -80,6 +80,7 @@ import static org.apache.cassandra.cql3.statements.RequestValidations.checkNotNu
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNull;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
 import static org.apache.cassandra.utils.ByteBufferUtil.UNSET_BYTE_BUFFER;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Encapsulates a completely parsed SELECT query, including the target
@@ -436,7 +437,7 @@ public class SelectStatement implements CQLStatement
 
     public ResultMessage.Rows executeLocally(QueryState state, QueryOptions options) throws RequestExecutionException, RequestValidationException
     {
-        return executeInternal(state, options, options.getNowInSeconds(state), System.nanoTime());
+        return executeInternal(state, options, options.getNowInSeconds(state), nanoTime());
     }
 
     public ResultMessage.Rows executeInternal(QueryState state, QueryOptions options, int nowInSec, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException
diff --git a/src/java/org/apache/cassandra/cql3/statements/UseStatement.java b/src/java/org/apache/cassandra/cql3/statements/UseStatement.java
index 3013d9f..ae6eeb0 100644
--- a/src/java/org/apache/cassandra/cql3/statements/UseStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/UseStatement.java
@@ -29,6 +29,8 @@ import org.apache.cassandra.service.QueryState;
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class UseStatement extends CQLStatement.Raw implements CQLStatement
 {
     private final String keyspace;
@@ -62,7 +64,7 @@ public class UseStatement extends CQLStatement.Raw implements CQLStatement
     {
         // In production, internal queries are exclusively on the system keyspace and 'use' is thus useless
         // but for some unit tests we need to set the keyspace (e.g. for tests with DROP INDEX)
-        return execute(state, options, System.nanoTime());
+        return execute(state, options, nanoTime());
     }
     
     @Override
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index 11fb56b..497466a 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -96,6 +96,8 @@ import org.apache.cassandra.utils.memory.MemtableAllocator;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.Throwables.maybeFail;
 import static org.apache.cassandra.utils.Throwables.merge;
 
@@ -1056,7 +1058,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
             if (logger.isTraceEnabled())
                 logger.trace("Flush task {}@{} starts executing, waiting on barrier", hashCode(), name);
 
-            long start = System.nanoTime();
+            long start = nanoTime();
 
             // mark writes older than the barrier as blocking progress, permitting them to exceed our memory limit
             // if they are stuck waiting on it, then wait for them all to complete
@@ -1064,7 +1066,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
             writeBarrier.await();
 
             if (logger.isTraceEnabled())
-                logger.trace("Flush task for task {}@{} waited {} ms at the barrier", hashCode(), name, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+                logger.trace("Flush task for task {}@{} waited {} ms at the barrier", hashCode(), name, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
 
             // mark all memtables as flushing, removing them from the live memtable list
             for (Memtable memtable : memtables)
@@ -1331,7 +1333,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
     public void apply(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup, CommitLogPosition commitLogPosition)
 
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             Memtable mt = data.getMemtableFor(opGroup, commitLogPosition);
@@ -1342,7 +1344,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
             if (metric.topWritePartitionSize.isEnabled()) // dont compute datasize if not needed
                 metric.topWritePartitionSize.addSample(key.getKey(), update.dataSize());
             StorageHook.instance.reportWrite(metadata.id, update);
-            metric.writeLatency.addNano(System.nanoTime() - start);
+            metric.writeLatency.addNano(nanoTime() - start);
             // CASSANDRA-11117 - certain resolution paths on memtable put can result in very
             // large time deltas, either through a variety of sentinel timestamps (used for empty values, ensuring
             // a minimal write, etc). This limits the time delta to the max value the histogram
@@ -1526,8 +1528,9 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         // skip snapshot creation during scrub, SEE JIRA 5891
         if(!disableSnapshot)
         {
-            Instant creationTime = Instant.now();
-            String snapshotName = "pre-scrub-" + creationTime.toEpochMilli();
+            long epochMilli = currentTimeMillis();
+            Instant creationTime = Instant.ofEpochMilli(epochMilli);
+            String snapshotName = "pre-scrub-" + epochMilli;
             snapshotWithoutFlush(snapshotName, creationTime);
         }
 
@@ -1732,9 +1735,9 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
                 return new RefViewFragment(view.sstables, view.memtables, refs);
             if (failingSince <= 0)
             {
-                failingSince = System.nanoTime();
+                failingSince = nanoTime();
             }
-            else if (System.nanoTime() - failingSince > TimeUnit.MILLISECONDS.toNanos(100))
+            else if (nanoTime() - failingSince > TimeUnit.MILLISECONDS.toNanos(100))
             {
                 List<SSTableReader> released = new ArrayList<>();
                 for (SSTableReader reader : view.sstables)
@@ -1742,7 +1745,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
                         released.add(reader);
                 NoSpamLogger.log(logger, NoSpamLogger.Level.WARN, 1, TimeUnit.SECONDS,
                                  "Spinning trying to capture readers {}, released: {}, ", view.sstables, released);
-                failingSince = System.nanoTime();
+                failingSince = nanoTime();
             }
         }
     }
@@ -2314,7 +2317,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
             }
         }
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         // make sure none of our sstables are somehow in the future (clock drift, perhaps)
         for (ColumnFamilyStore cfs : concatWithIndexes())
             for (SSTableReader sstable : cfs.getLiveSSTables())
@@ -2791,7 +2794,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
     {
         double allDroppable = 0;
         long allColumns = 0;
-        int localTime = (int)(System.currentTimeMillis()/1000);
+        int localTime = (int)(currentTimeMillis() / 1000);
 
         for (SSTableReader sstable : getSSTables(SSTableSet.LIVE))
         {
diff --git a/src/java/org/apache/cassandra/db/CounterMutation.java b/src/java/org/apache/cassandra/db/CounterMutation.java
index fe1e46e..3ac1c5d 100644
--- a/src/java/org/apache/cassandra/db/CounterMutation.java
+++ b/src/java/org/apache/cassandra/db/CounterMutation.java
@@ -50,6 +50,7 @@ import static java.util.concurrent.TimeUnit.*;
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
 import static org.apache.cassandra.net.MessagingService.VERSION_3014;
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class CounterMutation implements IMutation
 {
@@ -150,12 +151,12 @@ public class CounterMutation implements IMutation
 
     private void grabCounterLocks(Keyspace keyspace, List<Lock> locks) throws WriteTimeoutException
     {
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
         AbstractReplicationStrategy replicationStrategy = keyspace.getReplicationStrategy();
         for (Lock lock : LOCKS.bulkGet(getCounterLockKeys()))
         {
-            long timeout = getTimeout(NANOSECONDS) - (System.nanoTime() - startTime);
+            long timeout = getTimeout(NANOSECONDS) - (nanoTime() - startTime);
             try
             {
                 if (!lock.tryLock(timeout, NANOSECONDS))
diff --git a/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java b/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
index a30ce66..e4c7669 100644
--- a/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
@@ -26,6 +26,8 @@ import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.service.StorageProxy;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CounterMutationVerbHandler implements IVerbHandler<CounterMutation>
 {
     public static final CounterMutationVerbHandler instance = new CounterMutationVerbHandler();
@@ -34,7 +36,7 @@ public class CounterMutationVerbHandler implements IVerbHandler<CounterMutation>
 
     public void doVerb(final Message<CounterMutation> message)
     {
-        long queryStartNanoTime = System.nanoTime();
+        long queryStartNanoTime = nanoTime();
         final CounterMutation cm = message.payload;
         logger.trace("Applying forwarded {}", cm);
 
diff --git a/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java b/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java
index 81e3d1e..7f81b5c 100644
--- a/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java
+++ b/src/java/org/apache/cassandra/db/ExpirationDateOverflowHandling.java
@@ -32,6 +32,8 @@ import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class ExpirationDateOverflowHandling
 {
     private static final Logger logger = LoggerFactory.getLogger(ExpirationDateOverflowHandling.class);
@@ -75,7 +77,7 @@ public class ExpirationDateOverflowHandling
             return;
 
         // Check for localExpirationTime overflow (CASSANDRA-14092)
-        int nowInSecs = (int)(System.currentTimeMillis() / 1000);
+        int nowInSecs = (int)(currentTimeMillis() / 1000);
         if (ttl + nowInSecs < 0)
         {
             switch (policy)
diff --git a/src/java/org/apache/cassandra/db/Keyspace.java b/src/java/org/apache/cassandra/db/Keyspace.java
index e5cf741..08acf6b 100644
--- a/src/java/org/apache/cassandra/db/Keyspace.java
+++ b/src/java/org/apache/cassandra/db/Keyspace.java
@@ -75,6 +75,7 @@ import org.apache.cassandra.utils.concurrent.OpOrder;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 
 /**
@@ -285,7 +286,7 @@ public class Keyspace
      */
     public static String getTimestampedSnapshotName(String clientSuppliedName)
     {
-        String snapshotName = Long.toString(System.currentTimeMillis());
+        String snapshotName = Long.toString(currentTimeMillis());
         if (clientSuppliedName != null && !clientSuppliedName.equals(""))
         {
             snapshotName = snapshotName + "-" + clientSuppliedName;
@@ -547,7 +548,7 @@ public class Keyspace
 
         if (requiresViewUpdate)
         {
-            mutation.viewLockAcquireStart.compareAndSet(0L, System.currentTimeMillis());
+            mutation.viewLockAcquireStart.compareAndSet(0L, currentTimeMillis());
 
             // the order of lock acquisition doesn't matter (from a deadlock perspective) because we only use tryLock()
             Collection<TableId> tableIds = mutation.getTableIds();
@@ -625,7 +626,7 @@ public class Keyspace
                 }
             }
 
-            long acquireTime = System.currentTimeMillis() - mutation.viewLockAcquireStart.get();
+            long acquireTime = currentTimeMillis() - mutation.viewLockAcquireStart.get();
             // Metrics are only collected for droppable write operations
             // Bulk non-droppable operations (e.g. commitlog replay, hint delivery) are not measured
             if (isDroppable)
@@ -669,7 +670,7 @@ public class Keyspace
                 cfs.getWriteHandler().write(upd, ctx, indexTransaction);
 
                 if (requiresViewUpdate)
-                    baseComplete.set(System.currentTimeMillis());
+                    baseComplete.set(currentTimeMillis());
             }
 
             if (future != null) {
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
index 73c6416..6e23cec 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -75,6 +75,8 @@ import org.apache.cassandra.utils.memory.MemtablePool;
 import org.apache.cassandra.utils.memory.NativePool;
 import org.apache.cassandra.utils.memory.SlabPool;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class Memtable implements Comparable<Memtable>
 {
     private static final Logger logger = LoggerFactory.getLogger(Memtable.class);
@@ -137,7 +139,7 @@ public class Memtable implements Comparable<Memtable>
     // actually only store DecoratedKey.
     private final ConcurrentNavigableMap<PartitionPosition, AtomicBTreePartition> partitions = new ConcurrentSkipListMap<>();
     public final ColumnFamilyStore cfs;
-    private final long creationNano = System.nanoTime();
+    private final long creationNano = nanoTime();
 
     // The smallest timestamp for all partitions stored in this memtable
     private long minTimestamp = Long.MAX_VALUE;
@@ -261,7 +263,7 @@ public class Memtable implements Comparable<Memtable>
     public boolean isExpired()
     {
         int period = cfs.metadata().params.memtableFlushPeriodInMs;
-        return period > 0 && (System.nanoTime() - creationNano >= TimeUnit.MILLISECONDS.toNanos(period));
+        return period > 0 && (nanoTime() - creationNano >= TimeUnit.MILLISECONDS.toNanos(period));
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/db/ReadCommand.java b/src/java/org/apache/cassandra/db/ReadCommand.java
index 4ea589a..fd4636e 100644
--- a/src/java/org/apache/cassandra/db/ReadCommand.java
+++ b/src/java/org/apache/cassandra/db/ReadCommand.java
@@ -72,6 +72,7 @@ import org.apache.cassandra.utils.ObjectSizes;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.filter;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 import static org.apache.cassandra.db.partitions.UnfilteredPartitionIterators.MergeListener.NOOP;
 
@@ -397,7 +398,7 @@ public abstract class ReadCommand extends AbstractReadQuery
                                   // iterators created inside the try as long as we do close the original resultIterator), or by closing the result.
     public UnfilteredPartitionIterator executeLocally(ReadExecutionController executionController)
     {
-        long startTimeNanos = System.nanoTime();
+        long startTimeNanos = nanoTime();
 
         COMMAND.set(this);
         try
@@ -568,7 +569,7 @@ public abstract class ReadCommand extends AbstractReadQuery
             @Override
             public void onClose()
             {
-                recordLatency(metric, System.nanoTime() - startTimeNanos);
+                recordLatency(metric, nanoTime() - startTimeNanos);
 
                 metric.tombstoneScannedHistogram.update(tombstones);
                 metric.liveScannedHistogram.update(liveRows);
diff --git a/src/java/org/apache/cassandra/db/RepairedDataInfo.java b/src/java/org/apache/cassandra/db/RepairedDataInfo.java
index f80b113..32a4061 100644
--- a/src/java/org/apache/cassandra/db/RepairedDataInfo.java
+++ b/src/java/org/apache/cassandra/db/RepairedDataInfo.java
@@ -36,6 +36,8 @@ import org.apache.cassandra.metrics.TableMetrics;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 @NotThreadSafe
 class RepairedDataInfo
 {
@@ -281,7 +283,7 @@ class RepairedDataInfo
                     return null;
 
                 long countBeforeOverreads = repairedCounter.counted();
-                long overreadStartTime = System.nanoTime();
+                long overreadStartTime = nanoTime();
                 if (currentPartition != null)
                     consumePartition(currentPartition, repairedCounter);
 
@@ -291,7 +293,7 @@ class RepairedDataInfo
 
                 // we're not actually providing any more rows, just consuming the repaired data
                 long rows = repairedCounter.counted() - countBeforeOverreads;
-                long nanos = System.nanoTime() - overreadStartTime;
+                long nanos = nanoTime() - overreadStartTime;
                 metrics.repairedDataTrackingOverreadRows.update(rows);
                 metrics.repairedDataTrackingOverreadTime.update(nanos, TimeUnit.NANOSECONDS);
                 Tracing.trace("Read {} additional rows of repaired data for tracking in {}ps", rows, TimeUnit.NANOSECONDS.toMicros(nanos));
diff --git a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
index fe38d64..c5e9653 100644
--- a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
+++ b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
@@ -37,6 +37,8 @@ import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * A very simplistic/crude partition count/size estimator.
  *
@@ -89,7 +91,7 @@ public class SizeEstimatesRecorder extends SchemaChangeListener implements Runna
             boolean rangesAreEqual = primaryRanges.equals(localPrimaryRanges);
             for (ColumnFamilyStore table : keyspace.getColumnFamilyStores())
             {
-                long start = System.nanoTime();
+                long start = nanoTime();
 
                 // compute estimates for primary ranges for backwards compatability
                 Map<Range<Token>, Pair<Long, Long>> estimates = computeSizeEstimates(table, primaryRanges);
@@ -103,7 +105,7 @@ public class SizeEstimatesRecorder extends SchemaChangeListener implements Runna
                 }
                 SystemKeyspace.updateTableEstimates(table.metadata.keyspace, table.metadata.name, SystemKeyspace.TABLE_ESTIMATES_TYPE_LOCAL_PRIMARY, estimates);
 
-                long passed = System.nanoTime() - start;
+                long passed = nanoTime() - start;
                 if (logger.isTraceEnabled())
                     logger.trace("Spent {} milliseconds on estimating {}.{} size",
                                  TimeUnit.NANOSECONDS.toMillis(passed),
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index e07101c..a418c6d 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -73,6 +73,8 @@ import static java.util.Collections.singletonMap;
 
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
 import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public final class SystemKeyspace
 {
@@ -1008,13 +1010,13 @@ public final class SystemKeyspace
             // seconds-since-epoch isn't a foolproof new generation
             // (where foolproof is "guaranteed to be larger than the last one seen at this ip address"),
             // but it's as close as sanely possible
-            generation = (int) (System.currentTimeMillis() / 1000);
+            generation = (int) (currentTimeMillis() / 1000);
         }
         else
         {
             // Other nodes will ignore gossip messages about a node that have a lower generation than previously seen.
             final int storedGeneration = result.one().getInt("gossip_generation") + 1;
-            final int now = (int) (System.currentTimeMillis() / 1000);
+            final int now = (int) (currentTimeMillis() / 1000);
             if (storedGeneration >= now)
             {
                 logger.warn("Using stored Gossip Generation {} as it is greater than current system time {}.  See CASSANDRA-3654 if you experience problems",
@@ -1177,7 +1179,7 @@ public final class SystemKeyspace
     public static PaxosState loadPaxosState(DecoratedKey key, TableMetadata metadata, int nowInSec)
     {
         String req = "SELECT * FROM system.%s WHERE row_key = ? AND cf_id = ?";
-        UntypedResultSet results = QueryProcessor.executeInternalWithNow(nowInSec, System.nanoTime(), format(req, PAXOS), key.getKey(), metadata.id.asUUID());
+        UntypedResultSet results = QueryProcessor.executeInternalWithNow(nowInSec, nanoTime(), format(req, PAXOS), key.getKey(), metadata.id.asUUID());
         if (results.isEmpty())
             return new PaxosState(key, metadata);
         UntypedResultSet.Row row = results.one();
diff --git a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java
index a65ef00..24838a9 100644
--- a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java
+++ b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogService.java
@@ -33,6 +33,9 @@ import org.apache.cassandra.utils.MonotonicClock;
 import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public abstract class AbstractCommitLogService
 {
     /**
@@ -45,7 +48,7 @@ public abstract class AbstractCommitLogService
     private volatile boolean shutdown = false;
 
     // all Allocations written before this time will be synced
-    protected volatile long lastSyncedAt = System.currentTimeMillis();
+    protected volatile long lastSyncedAt = currentTimeMillis();
 
     // counts of total written, and pending, log messages
     private final AtomicLong written = new AtomicLong(0);
@@ -292,7 +295,7 @@ public abstract class AbstractCommitLogService
      */
     public void syncBlocking()
     {
-        long requestTime = System.nanoTime();
+        long requestTime = nanoTime();
         requestExtraSync();
         awaitSyncAt(requestTime, null);
     }
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
index 3e7e3ab..55461a3 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
@@ -48,6 +48,7 @@ import org.apache.cassandra.utils.IntegerInterval;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.utils.FBUtilities.updateChecksumInt;
 
 /*
@@ -78,7 +79,7 @@ public abstract class CommitLogSegment
             if (CommitLogDescriptor.isValid(file.getName()))
                 maxId = Math.max(CommitLogDescriptor.fromFileName(file.getName()).id, maxId);
         }
-        replayLimitId = idBase = Math.max(System.currentTimeMillis(), maxId + 1);
+        replayLimitId = idBase = Math.max(currentTimeMillis(), maxId + 1);
     }
 
     // The commit log entry overhead in bytes (int: length + int: head checksum + int: tail checksum)
diff --git a/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java
index e94c616..0644f32 100644
--- a/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java
+++ b/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java
@@ -21,6 +21,8 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 class PeriodicCommitLogService extends AbstractCommitLogService
 {
     private static final long blockWhenSyncLagsNanos = TimeUnit.MILLISECONDS.toNanos(DatabaseDescriptor.getPeriodicCommitLogSyncBlock());
@@ -33,7 +35,7 @@ class PeriodicCommitLogService extends AbstractCommitLogService
 
     protected void maybeWaitForSync(CommitLogSegment.Allocation alloc)
     {
-        long expectedSyncTime = System.nanoTime() - blockWhenSyncLagsNanos;
+        long expectedSyncTime = nanoTime() - blockWhenSyncLagsNanos;
         if (lastSyncedAt < expectedSyncTime)
         {
             pending.incrementAndGet();
diff --git a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java
index 0b37c22..c0c4a95 100644
--- a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java
@@ -44,6 +44,9 @@ import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 import org.apache.cassandra.schema.CompactionParams;
 
+import static org.apache.cassandra.io.sstable.Component.DATA;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Pluggable compaction strategy determines how SSTables get merged.
  *
@@ -388,7 +391,7 @@ public abstract class AbstractCompactionStrategy
         // since we use estimations to calculate, there is a chance that compaction will not drop tombstones actually.
         // if that happens we will end up in infinite compaction loop, so first we check enough if enough time has
         // elapsed since SSTable created.
-        if (System.currentTimeMillis() < sstable.getCreationTimeFor(Component.DATA) + tombstoneCompactionInterval * 1000)
+        if (currentTimeMillis() < sstable.getCreationTimeFor(DATA) + tombstoneCompactionInterval * 1000)
            return false;
 
         double droppableRatio = sstable.getEstimatedDroppableTombstoneRatio(gcBefore);
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java
index f473be7..a533f95 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java
@@ -45,6 +45,8 @@ import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.utils.NoSpamLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class CompactionLogger
 {
     public interface Strategy
@@ -220,7 +222,7 @@ public class CompactionLogger
             return;
         node.put("keyspace", cfs.keyspace.getName());
         node.put("table", cfs.getTableName());
-        node.put("time", System.currentTimeMillis());
+        node.put("time", currentTimeMillis());
     }
 
     private JsonNode startStrategies()
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index c3e4f59..f099fb6 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -86,6 +86,7 @@ import org.apache.cassandra.utils.concurrent.Refs;
 import static java.util.Collections.singleton;
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * <p>
@@ -1267,7 +1268,7 @@ public class CompactionManager implements CompactionManagerMBean
             return;
         }
 
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         long totalkeysWritten = 0;
 
@@ -1325,7 +1326,7 @@ public class CompactionManager implements CompactionManagerMBean
         if (!finished.isEmpty())
         {
             String format = "Cleaned up to %s.  %s to %s (~%d%% of original) for %,d keys.  Time: %,dms.";
-            long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+            long dTime = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
             long startsize = sstable.onDiskLength();
             long endsize = 0;
             for (SSTableReader newSstable : finished)
@@ -2247,10 +2248,10 @@ public class CompactionManager implements CompactionManagerMBean
 
     public void waitForCessation(Iterable<ColumnFamilyStore> cfss, Predicate<SSTableReader> sstablePredicate)
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         long delay = TimeUnit.MINUTES.toNanos(1);
 
-        while (System.nanoTime() - start < delay)
+        while (nanoTime() - start < delay)
         {
             if (CompactionManager.instance.isCompacting(cfss, sstablePredicate))
                 Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index 19a0698..9f654e2 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -47,6 +47,9 @@ import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CompactionTask extends AbstractCompactionTask
 {
     protected static final Logger logger = LoggerFactory.getLogger(CompactionTask.class);
@@ -117,11 +120,11 @@ public class CompactionTask extends AbstractCompactionTask
 
         if (DatabaseDescriptor.isSnapshotBeforeCompaction())
         {
-            Instant creationTime = Instant.now();
-            cfs.snapshotWithoutFlush(creationTime.toEpochMilli() + "-compact-" + cfs.name, creationTime);
+            long epochMilli = currentTimeMillis();
+            Instant creationTime = Instant.ofEpochMilli(epochMilli);
+            cfs.snapshotWithoutFlush(epochMilli + "-compact-" + cfs.name, creationTime);
         }
 
-
         try (CompactionController controller = getCompactionController(transaction.originals()))
         {
 
@@ -155,8 +158,8 @@ public class CompactionTask extends AbstractCompactionTask
             logger.info("Compacting ({}) {}", taskId, ssTableLoggerMsg);
 
             RateLimiter limiter = CompactionManager.instance.getRateLimiter();
-            long start = System.nanoTime();
-            long startTime = System.currentTimeMillis();
+            long start = nanoTime();
+            long startTime = currentTimeMillis();
             long totalKeysWritten = 0;
             long estimatedKeys = 0;
             long inputSizeBytes;
@@ -206,10 +209,10 @@ public class CompactionTask extends AbstractCompactionTask
 
                         lastBytesScanned = bytesScanned;
 
-                        if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
+                        if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
                         {
                             controller.maybeRefreshOverlaps();
-                            lastCheckObsoletion = System.nanoTime();
+                            lastCheckObsoletion = nanoTime();
                         }
                     }
 
@@ -232,7 +235,7 @@ public class CompactionTask extends AbstractCompactionTask
             {
                 // log a bunch of statistics about the result and save to system table compaction_history
 
-                long durationInNano = System.nanoTime() - start;
+                long durationInNano = nanoTime() - start;
                 long dTime = TimeUnit.NANOSECONDS.toMillis(durationInNano);
                 long startsize = inputSizeBytes;
                 long endsize = SSTableReader.getTotalBytes(newSStables);
@@ -267,7 +270,7 @@ public class CompactionTask extends AbstractCompactionTask
                     logger.trace("CF Total Bytes Compacted: {}", FBUtilities.prettyPrintMemory(CompactionTask.addToTotalBytesCompacted(endsize)));
                     logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", totalKeysWritten, estimatedKeys, ((double)(totalKeysWritten - estimatedKeys)/totalKeysWritten));
                 }
-                cfs.getCompactionStrategyManager().compactionLogger.compaction(startTime, transaction.originals(), System.currentTimeMillis(), newSStables);
+                cfs.getCompactionStrategyManager().compactionLogger.compaction(startTime, transaction.originals(), currentTimeMillis(), newSStables);
 
                 // update the metrics
                 cfs.metric.compactionBytesWritten.inc(endsize);
@@ -298,7 +301,7 @@ public class CompactionTask extends AbstractCompactionTask
             mergeSummary.append(String.format("%d:%d, ", rows, count));
             mergedRows.put(rows, count);
         }
-        SystemKeyspace.updateCompactionHistory(keyspaceName, columnFamilyName, System.currentTimeMillis(), startSize, endSize, mergedRows);
+        SystemKeyspace.updateCompactionHistory(keyspaceName, columnFamilyName, currentTimeMillis(), startSize, endSize, mergedRows);
         return mergeSummary.toString();
     }
 
diff --git a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
index ab2b6ae..40f744b 100644
--- a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
@@ -38,6 +38,7 @@ import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.utils.Pair;
 
 import static com.google.common.collect.Iterables.filter;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * @deprecated in favour of {@link TimeWindowCompactionStrategy}
@@ -116,11 +117,11 @@ public class DateTieredCompactionStrategy extends AbstractCompactionStrategy
 
         Set<SSTableReader> expired = Collections.emptySet();
         // we only check for expired sstables every 10 minutes (by default) due to it being an expensive operation
-        if (System.currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
+        if (currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
         {
             // Find fully expired SSTables. Those will be included no matter what.
             expired = CompactionController.getFullyExpiredSSTables(cfs, uncompacting, cfs.getOverlappingLiveSSTables(uncompacting), gcBefore);
-            lastExpiredCheck = System.currentTimeMillis();
+            lastExpiredCheck = currentTimeMillis();
         }
         Set<SSTableReader> candidates = Sets.newHashSet(filterSuspectSSTables(uncompacting));
 
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java
index 2cc1b1a..ff231e7 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java
@@ -42,6 +42,8 @@ import org.apache.cassandra.config.Config;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Handles the leveled manifest generations
  *
@@ -68,7 +70,7 @@ class LeveledGenerations
      */
     private final Map<SSTableReader, SSTableReader> allSSTables = new HashMap<>();
     private final Set<SSTableReader> l0 = new HashSet<>();
-    private static long lastOverlapCheck = System.nanoTime();
+    private static long lastOverlapCheck = nanoTime();
     // note that since l0 is broken out, levels[0] represents L1:
     private final TreeSet<SSTableReader> [] levels = new TreeSet[MAX_LEVEL_COUNT - 1];
 
@@ -310,10 +312,10 @@ class LeveledGenerations
      */
     private void maybeVerifyLevels()
     {
-        if (!strictLCSChecksTest || System.nanoTime() - lastOverlapCheck <= TimeUnit.NANOSECONDS.convert(5, TimeUnit.SECONDS))
+        if (!strictLCSChecksTest || nanoTime() - lastOverlapCheck <= TimeUnit.NANOSECONDS.convert(5, TimeUnit.SECONDS))
             return;
         logger.info("LCS verifying levels");
-        lastOverlapCheck = System.nanoTime();
+        lastOverlapCheck = nanoTime();
         for (int i = 1; i < levelCount(); i++)
         {
             SSTableReader prev = null;
diff --git a/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
index 968def3..d3b3021 100644
--- a/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
@@ -45,6 +45,7 @@ import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.utils.Pair;
 
 import static com.google.common.collect.Iterables.filter;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class TimeWindowCompactionStrategy extends AbstractCompactionStrategy
 {
@@ -117,12 +118,12 @@ public class TimeWindowCompactionStrategy extends AbstractCompactionStrategy
         // Find fully expired SSTables. Those will be included no matter what.
         Set<SSTableReader> expired = Collections.emptySet();
 
-        if (System.currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
+        if (currentTimeMillis() - lastExpiredCheck > options.expiredSSTableCheckFrequency)
         {
             logger.debug("TWCS expired check sufficiently far in the past, checking for fully expired SSTables");
             expired = CompactionController.getFullyExpiredSSTables(cfs, uncompacting, options.ignoreOverlaps ? Collections.emptySet() : cfs.getOverlappingLiveSSTables(uncompacting),
                                                                    gcBefore, options.ignoreOverlaps);
-            lastExpiredCheck = System.currentTimeMillis();
+            lastExpiredCheck = currentTimeMillis();
         }
         else
         {
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
index a91af73..1fc371f 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
@@ -41,6 +41,7 @@ import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.format.big.BigFormat;
 import org.apache.cassandra.utils.Throwables;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.utils.Throwables.merge;
 
 /**
@@ -274,12 +275,12 @@ final class LogFile implements AutoCloseable
 
     void commit()
     {
-        addRecord(LogRecord.makeCommit(System.currentTimeMillis()));
+        addRecord(LogRecord.makeCommit(currentTimeMillis()));
     }
 
     void abort()
     {
-        addRecord(LogRecord.makeAbort(System.currentTimeMillis()));
+        addRecord(LogRecord.makeAbort(currentTimeMillis()));
     }
 
     private boolean isLastRecordValidWithType(Type type)
diff --git a/src/java/org/apache/cassandra/db/marshal/TemporalType.java b/src/java/org/apache/cassandra/db/marshal/TemporalType.java
index 4e2ac5a..945dae0 100644
--- a/src/java/org/apache/cassandra/db/marshal/TemporalType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TemporalType.java
@@ -21,6 +21,8 @@ import java.nio.ByteBuffer;
 
 import org.apache.cassandra.cql3.Duration;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Base type for temporal types (timestamp, date ...).
  *
@@ -38,7 +40,7 @@ public abstract class TemporalType<T> extends AbstractType<T>
      */
     public ByteBuffer now()
     {
-        return fromTimeInMillis(System.currentTimeMillis());
+        return fromTimeInMillis(currentTimeMillis());
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
index 801d9e2..c5e9fe4 100644
--- a/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
+++ b/src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
@@ -39,6 +39,8 @@ import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.HeapAllocator;
 import org.apache.cassandra.utils.memory.MemtableAllocator;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * A thread-safe and atomic Partition implementation.
  *
@@ -305,7 +307,7 @@ public final class AtomicBTreePartition extends AbstractBTreePartition
             while (TRACKER_PESSIMISTIC_LOCKING != (oldTrackerValue = wasteTracker))
             {
                 // Note this time value has an arbitrary offset, but is a constant rate 32 bit counter (that may wrap)
-                int time = (int) (System.nanoTime() >>> CLOCK_SHIFT);
+                int time = (int) (nanoTime() >>> CLOCK_SHIFT);
                 int delta = oldTrackerValue - time;
                 if (oldTrackerValue == TRACKER_NEVER_WASTED || delta >= 0 || delta < -EXCESS_WASTE_OFFSET)
                     delta = -EXCESS_WASTE_OFFSET;
diff --git a/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java b/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java
index e0ee68d..0d89282 100644
--- a/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java
+++ b/src/java/org/apache/cassandra/db/repair/PendingAntiCompaction.java
@@ -57,6 +57,7 @@ import org.apache.cassandra.utils.concurrent.Refs;
 
 import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR;
 import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Performs an anti compaction on a set of tables and token ranges, isolating the unrepaired sstables
@@ -218,7 +219,7 @@ public class PendingAntiCompaction
             logger.debug("acquiring sstables for pending anti compaction on session {}", sessionID);
             // try to modify after cancelling running compactions. This will attempt to cancel in flight compactions including the given sstables for
             // up to a minute, after which point, null will be returned
-            long start = System.currentTimeMillis();
+            long start = currentTimeMillis();
             long delay = TimeUnit.SECONDS.toMillis(acquireRetrySeconds);
             // Note that it is `predicate` throwing SSTableAcquisitionException if it finds a conflicting sstable
             // and we only retry when runWithCompactionsDisabled throws when uses the predicate, not when acquireTuple is.
@@ -238,10 +239,10 @@ public class PendingAntiCompaction
                                 sessionID,
                                 e.getMessage(),
                                 acquireSleepMillis,
-                                TimeUnit.SECONDS.convert(delay + start - System.currentTimeMillis(), TimeUnit.MILLISECONDS));
+                                TimeUnit.SECONDS.convert(delay + start - currentTimeMillis(), TimeUnit.MILLISECONDS));
                     Uninterruptibles.sleepUninterruptibly(acquireSleepMillis, TimeUnit.MILLISECONDS);
 
-                    if (System.currentTimeMillis() - start > delay)
+                    if (currentTimeMillis() - start > delay)
                         logger.warn("{} Timed out waiting to acquire sstables", sessionID, e);
 
                 }
@@ -250,7 +251,7 @@ public class PendingAntiCompaction
                     logger.error("Got exception disabling compactions for session {}", sessionID, t);
                     throw t;
                 }
-            } while (System.currentTimeMillis() - start < delay);
+            } while (currentTimeMillis() - start < delay);
             return null;
         }
     }
diff --git a/src/java/org/apache/cassandra/db/view/TableViews.java b/src/java/org/apache/cassandra/db/view/TableViews.java
index cc58dc1..3afd128 100644
--- a/src/java/org/apache/cassandra/db/view/TableViews.java
+++ b/src/java/org/apache/cassandra/db/view/TableViews.java
@@ -41,6 +41,8 @@ import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.btree.BTreeSet;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 
 /**
  * Groups all the views for a given table.
@@ -146,13 +148,13 @@ public class TableViews extends AbstractCollection<View>
 
         // Read modified rows
         int nowInSec = FBUtilities.nowInSeconds();
-        long queryStartNanoTime = System.nanoTime();
+        long queryStartNanoTime = nanoTime();
         SinglePartitionReadCommand command = readExistingRowsCommand(update, views, nowInSec);
         if (command == null)
             return;
 
         ColumnFamilyStore cfs = Keyspace.openAndGetStore(update.metadata());
-        long start = System.nanoTime();
+        long start = nanoTime();
         Collection<Mutation> mutations;
         try (ReadExecutionController orderGroup = command.executionController();
              UnfilteredRowIterator existings = UnfilteredPartitionIterators.getOnlyElement(command.executeLocally(orderGroup), command);
@@ -160,7 +162,7 @@ public class TableViews extends AbstractCollection<View>
         {
             mutations = Iterators.getOnlyElement(generateViewUpdates(views, updates, existings, nowInSec, false));
         }
-        Keyspace.openAndGetStore(update.metadata()).metric.viewReadTime.update(System.nanoTime() - start, TimeUnit.NANOSECONDS);
+        Keyspace.openAndGetStore(update.metadata()).metric.viewReadTime.update(nanoTime() - start, TimeUnit.NANOSECONDS);
 
         if (!mutations.isEmpty())
             StorageProxy.mutateMV(update.partitionKey().getKey(), mutations, writeCommitLog, baseComplete, queryStartNanoTime);
diff --git a/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java b/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java
index c84c697..3c9c037 100644
--- a/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java
+++ b/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java
@@ -63,6 +63,8 @@ import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.UUIDGen;
 import org.apache.cassandra.utils.concurrent.Refs;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class ViewBuilderTask extends CompactionInfo.Holder implements Callable<Long>
 {
     private static final Logger logger = LoggerFactory.getLogger(ViewBuilderTask.class);
@@ -115,7 +117,7 @@ public class ViewBuilderTask extends CompactionInfo.Holder implements Callable<L
                                                        .generateViewUpdates(Collections.singleton(view), data, empty, nowInSec, true);
 
             AtomicLong noBase = new AtomicLong(Long.MAX_VALUE);
-            mutations.forEachRemaining(m -> StorageProxy.mutateMV(key.getKey(), m, true, noBase, System.nanoTime()));
+            mutations.forEachRemaining(m -> StorageProxy.mutateMV(key.getKey(), m, true, noBase, nanoTime()));
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java b/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
index 96fb7f9..b20ac43 100644
--- a/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
@@ -38,6 +38,8 @@ import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.schema.TableMetadata;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * An abstract virtual table implementation that builds the resultset on demand.
  */
@@ -81,7 +83,7 @@ public abstract class AbstractVirtualTable implements VirtualTable
         if (null == partition)
             return EmptyIterators.unfilteredPartition(metadata);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         UnfilteredRowIterator rowIterator = partition.toRowIterator(metadata(), clusteringIndexFilter, columnFilter, now);
         return new SingletonUnfilteredPartitionIterator(rowIterator);
     }
@@ -96,7 +98,7 @@ public abstract class AbstractVirtualTable implements VirtualTable
 
         Iterator<Partition> iterator = data.getPartitions(dataRange);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
 
         return new AbstractUnfilteredPartitionIterator()
         {
diff --git a/src/java/org/apache/cassandra/diag/DiagnosticEvent.java b/src/java/org/apache/cassandra/diag/DiagnosticEvent.java
index 5de703b..229710c 100644
--- a/src/java/org/apache/cassandra/diag/DiagnosticEvent.java
+++ b/src/java/org/apache/cassandra/diag/DiagnosticEvent.java
@@ -20,6 +20,8 @@ package org.apache.cassandra.diag;
 import java.io.Serializable;
 import java.util.Map;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Base class for internally emitted events used for diagnostics and testing.
  */
@@ -28,7 +30,7 @@ public abstract class DiagnosticEvent
     /**
      * Event creation time.
      */
-    public final long timestamp = System.currentTimeMillis();
+    public final long timestamp = currentTimeMillis();
 
     /**
      * Name of allocating thread.
diff --git a/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java b/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java
index 8e991e6..e82b40c 100644
--- a/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java
+++ b/src/java/org/apache/cassandra/diag/LastEventIdBroadcaster.java
@@ -33,6 +33,8 @@ import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.progress.jmx.JMXBroadcastExecutor;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Broadcaster for notifying JMX clients on newly available data. Periodically sends {@link Notification}s
  * containing a list of event types and greatest event IDs. Consumers may use this information to
@@ -100,7 +102,7 @@ final class LastEventIdBroadcaster extends NotificationBroadcasterSupport implem
     {
         // ensure monotonic properties of ids
         if (summary.compute(key, (k, v) -> v == null ? id : id.compareTo(v) > 0 ? id : v) == id) {
-            summary.put("last_updated_at", System.currentTimeMillis());
+            summary.put("last_updated_at", currentTimeMillis());
             scheduleBroadcast();
         }
     }
@@ -132,7 +134,7 @@ final class LastEventIdBroadcaster extends NotificationBroadcasterSupport implem
         Notification notification = new Notification("event_last_id_summary",
                                                      "LastEventIdBroadcaster",
                                                      notificationSerialNumber.incrementAndGet(),
-                                                     System.currentTimeMillis(),
+                                                     currentTimeMillis(),
                                                      "Event last IDs summary");
         notification.setUserData(summary);
         sendNotification(notification);
diff --git a/src/java/org/apache/cassandra/gms/EndpointState.java b/src/java/org/apache/cassandra/gms/EndpointState.java
index b8d5626..9a69de3 100644
--- a/src/java/org/apache/cassandra/gms/EndpointState.java
+++ b/src/java/org/apache/cassandra/gms/EndpointState.java
@@ -33,6 +33,8 @@ import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.CassandraVersion;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * This abstraction represents both the HeartBeatState and the ApplicationState in an EndpointState
  * instance. Any state for a given endpoint can be retrieved from this instance.
@@ -66,7 +68,7 @@ public class EndpointState
     {
         hbState = initialHbState;
         applicationState = new AtomicReference<Map<ApplicationState, VersionedValue>>(new EnumMap<>(states));
-        updateTimestamp = System.nanoTime();
+        updateTimestamp = nanoTime();
         isAlive = true;
     }
 
@@ -173,7 +175,7 @@ public class EndpointState
 
     void updateTimestamp()
     {
-        updateTimestamp = System.nanoTime();
+        updateTimestamp = nanoTime();
     }
 
     public boolean isAlive()
diff --git a/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java b/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java
index 0242d83..5fbe7ce 100644
--- a/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java
+++ b/src/java/org/apache/cassandra/gms/GossipDigestAckVerbHandler.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.net.Message;
 import org.apache.cassandra.net.MessagingService;
 
 import static org.apache.cassandra.net.Verb.GOSSIP_DIGEST_ACK2;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class GossipDigestAckVerbHandler extends GossipVerbHandler<GossipDigestAck>
 {
@@ -68,7 +69,7 @@ public class GossipDigestAckVerbHandler extends GossipVerbHandler<GossipDigestAc
             // Ignore any GossipDigestAck messages that we handle before a regular GossipDigestSyn has been send.
             // This will prevent Acks from leaking over from the shadow round that are not actual part of
             // the regular gossip conversation.
-            if ((System.nanoTime() - Gossiper.instance.firstSynSendAt) < 0 || Gossiper.instance.firstSynSendAt == 0)
+            if ((nanoTime() - Gossiper.instance.firstSynSendAt) < 0 || Gossiper.instance.firstSynSendAt == 0)
             {
                 if (logger.isTraceEnabled())
                     logger.trace("Ignoring unrequested GossipDigestAck from {}", from);
diff --git a/src/java/org/apache/cassandra/gms/Gossiper.java b/src/java/org/apache/cassandra/gms/Gossiper.java
index 6eb674f..f0114c0 100644
--- a/src/java/org/apache/cassandra/gms/Gossiper.java
+++ b/src/java/org/apache/cassandra/gms/Gossiper.java
@@ -68,6 +68,8 @@ import static org.apache.cassandra.config.CassandraRelevantProperties.GOSSIPER_Q
 import static org.apache.cassandra.net.NoPayload.noPayload;
 import static org.apache.cassandra.net.Verb.ECHO_REQ;
 import static org.apache.cassandra.net.Verb.GOSSIP_DIGEST_SYN;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * This module is responsible for Gossiping information for the local endpoint. This abstraction
@@ -158,7 +160,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
     // endpoint states as gathered during shadow round
     private final Map<InetAddressAndPort, EndpointState> endpointShadowStateMap = new ConcurrentHashMap<>();
 
-    private volatile long lastProcessedMessageAt = System.currentTimeMillis();
+    private volatile long lastProcessedMessageAt = currentTimeMillis();
 
     /**
      * This property is initially set to {@code true} which means that we have no information about the other nodes.
@@ -448,7 +450,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
     {
         Long downtime = unreachableEndpoints.get(ep);
         if (downtime != null)
-            return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - downtime);
+            return TimeUnit.NANOSECONDS.toMillis(nanoTime() - downtime);
         else
             return 0L;
     }
@@ -620,7 +622,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
      */
     private void quarantineEndpoint(InetAddressAndPort endpoint)
     {
-        quarantineEndpoint(endpoint, System.currentTimeMillis());
+        quarantineEndpoint(endpoint, currentTimeMillis());
     }
 
     /**
@@ -643,7 +645,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
     {
         // remember, quarantineEndpoint will effectively already add QUARANTINE_DELAY, so this is 2x
         logger.debug("");
-        quarantineEndpoint(endpoint, System.currentTimeMillis() + QUARANTINE_DELAY);
+        quarantineEndpoint(endpoint, currentTimeMillis() + QUARANTINE_DELAY);
         GossiperDiagnostics.replacementQuarantine(this, endpoint);
     }
 
@@ -777,7 +779,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
 
             if (epState == null)
             {
-                epState = new EndpointState(new HeartBeatState((int) ((System.currentTimeMillis() + 60000) / 1000), 9999));
+                epState = new EndpointState(new HeartBeatState((int) ((currentTimeMillis() + 60000) / 1000), 9999));
             }
             else
             {
@@ -851,7 +853,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
         if (logger.isTraceEnabled())
             logger.trace("Sending a GossipDigestSyn to {} ...", to);
         if (firstSynSendAt == 0)
-            firstSynSendAt = System.nanoTime();
+            firstSynSendAt = nanoTime();
         MessagingService.instance().send(message, to);
 
         boolean isSeed = seeds.contains(to);
@@ -985,8 +987,8 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
         if (logger.isTraceEnabled())
             logger.trace("Performing status check ...");
 
-        long now = System.currentTimeMillis();
-        long nowNano = System.nanoTime();
+        long now = currentTimeMillis();
+        long nowNano = nanoTime();
 
         long pending = ((JMXEnabledThreadPoolExecutor) Stage.GOSSIP.executor()).metrics.pendingTasks.getValue();
         if (pending > 0 && lastProcessedMessageAt < now - 1000)
@@ -1307,7 +1309,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
     {
         localState.markDead();
         liveEndpoints.remove(addr);
-        unreachableEndpoints.put(addr, System.nanoTime());
+        unreachableEndpoints.put(addr, nanoTime());
     }
 
     /**
@@ -1448,7 +1450,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
             {
                 int localGeneration = localEpStatePtr.getHeartBeatState().getGeneration();
                 int remoteGeneration = remoteState.getHeartBeatState().getGeneration();
-                long localTime = System.currentTimeMillis()/1000;
+                long localTime = currentTimeMillis() / 1000;
                 if (logger.isTraceEnabled())
                     logger.trace("{} local generation {}, remote generation {}", ep, localGeneration, remoteGeneration);
 
@@ -2125,7 +2127,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
 
     public static long computeExpireTime()
     {
-        return System.currentTimeMillis() + Gossiper.aVeryLongTime;
+        return currentTimeMillis() + aVeryLongTime;
     }
 
     @Nullable
diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java
index 1ea8eda..57fb238 100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java
@@ -54,7 +54,7 @@ import org.apache.cassandra.dht.*;
 import org.apache.cassandra.hadoop.*;
 import org.apache.cassandra.utils.*;
 
-import static java.util.stream.Collectors.toMap;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Hadoop InputFormat allowing map/reduce against Cassandra rows within one ColumnFamily.
@@ -242,7 +242,7 @@ public class CqlInputFormat extends org.apache.hadoop.mapreduce.InputFormat<Long
         }
 
         assert splits.size() > 0;
-        Collections.shuffle(splits, new Random(System.nanoTime()));
+        Collections.shuffle(splits, new Random(nanoTime()));
         return splits;
     }
 
diff --git a/src/java/org/apache/cassandra/hints/Hint.java b/src/java/org/apache/cassandra/hints/Hint.java
index b6459a1..11ac515 100644
--- a/src/java/org/apache/cassandra/hints/Hint.java
+++ b/src/java/org/apache/cassandra/hints/Hint.java
@@ -39,6 +39,7 @@ import org.assertj.core.util.VisibleForTesting;
 
 import static org.apache.cassandra.db.TypeSizes.sizeof;
 import static org.apache.cassandra.db.TypeSizes.sizeofUnsignedVInt;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Encapsulates the hinted mutation, its creation time, and the gc grace seconds param for each table involved.
@@ -135,7 +136,7 @@ public final class Hint
      */
     public boolean isLive()
     {
-        return isLive(creationTime, System.currentTimeMillis(), ttl());
+        return isLive(creationTime, currentTimeMillis(), ttl());
     }
 
     static boolean isLive(long creationTime, long now, int hintTTL)
diff --git a/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java b/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java
index 67fb3e5..4d10711 100644
--- a/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java
+++ b/src/java/org/apache/cassandra/hints/HintsCleanupTrigger.java
@@ -25,6 +25,8 @@ import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.service.StorageService;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Delete the expired orphaned hints files.
  * An orphaned file is considered as no associating endpoint with its host ID.
@@ -62,7 +64,7 @@ final class HintsCleanupTrigger implements Runnable
 
         // Interrupt the dispatch if any. At this step, it is certain that the hintsStore is orphaned.
         dispatchExecutor.interruptDispatch(hintsStore.hostId);
-        Runnable cleanup = () -> hintsStore.deleteExpiredHints(System.currentTimeMillis());
+        Runnable cleanup = () -> hintsStore.deleteExpiredHints(currentTimeMillis());
         ScheduledExecutors.optionalTasks.execute(cleanup);
     }
 }
diff --git a/src/java/org/apache/cassandra/hints/HintsReader.java b/src/java/org/apache/cassandra/hints/HintsReader.java
index 9a5f75a..7514fd4 100644
--- a/src/java/org/apache/cassandra/hints/HintsReader.java
+++ b/src/java/org/apache/cassandra/hints/HintsReader.java
@@ -36,6 +36,8 @@ import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.AbstractIterator;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * A paged non-compressed hints reader that provides two iterators:
  * - a 'raw' ByteBuffer iterator that doesn't deserialize the hints, but returns the pre-encoded hints verbatim
@@ -164,7 +166,7 @@ class HintsReader implements AutoCloseable, Iterable<HintsReader.Page>
     final class HintsIterator extends AbstractIterator<Hint>
     {
         private final InputPosition offset;
-        private final long now = System.currentTimeMillis();
+        private final long now = currentTimeMillis();
 
         HintsIterator(InputPosition offset)
         {
@@ -270,7 +272,7 @@ class HintsReader implements AutoCloseable, Iterable<HintsReader.Page>
     final class BuffersIterator extends AbstractIterator<ByteBuffer>
     {
         private final InputPosition offset;
-        private final long now = System.currentTimeMillis();
+        private final long now = currentTimeMillis();
 
         BuffersIterator(InputPosition offset)
         {
diff --git a/src/java/org/apache/cassandra/hints/HintsStore.java b/src/java/org/apache/cassandra/hints/HintsStore.java
index 76c13d1..1e72f8b 100644
--- a/src/java/org/apache/cassandra/hints/HintsStore.java
+++ b/src/java/org/apache/cassandra/hints/HintsStore.java
@@ -38,6 +38,8 @@ import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.SyncUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Encapsulates the state of a peer's hints: the queue of hints files for dispatch, and the current writer (if any).
  *
@@ -247,7 +249,7 @@ final class HintsStore
 
     private HintsWriter openWriter()
     {
-        lastUsedTimestamp = Math.max(System.currentTimeMillis(), lastUsedTimestamp + 1);
+        lastUsedTimestamp = Math.max(currentTimeMillis(), lastUsedTimestamp + 1);
         HintsDescriptor descriptor = new HintsDescriptor(hostId, lastUsedTimestamp, writerParams);
 
         try
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java
index 0af4ba2..1f01a9c 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java
@@ -52,6 +52,8 @@ import com.google.common.util.concurrent.Uninterruptibles;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class PerSSTableIndexWriter implements SSTableFlushObserver
 {
     private static final Logger logger = LoggerFactory.getLogger(PerSSTableIndexWriter.class);
@@ -251,7 +253,7 @@ public class PerSSTableIndexWriter implements SSTableFlushObserver
             final String segmentFile = filename(isFinal);
 
             return () -> {
-                long start = System.nanoTime();
+                long start = nanoTime();
 
                 try
                 {
@@ -266,7 +268,7 @@ public class PerSSTableIndexWriter implements SSTableFlushObserver
                 finally
                 {
                     if (!isFinal)
-                        logger.info("Flushed index segment {}, took {} ms.", segmentFile, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+                        logger.info("Flushed index segment {}, took {} ms.", segmentFile, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
                 }
             };
         }
@@ -276,7 +278,7 @@ public class PerSSTableIndexWriter implements SSTableFlushObserver
             logger.info("Scheduling index flush to {}", outputFile);
 
             getExecutor().submit((Runnable) () -> {
-                long start1 = System.nanoTime();
+                long start1 = nanoTime();
 
                 OnDiskIndex[] parts = new OnDiskIndex[segments.size() + 1];
 
@@ -324,7 +326,7 @@ public class PerSSTableIndexWriter implements SSTableFlushObserver
                 }
                 finally
                 {
-                    logger.info("Index flush to {} took {} ms.", outputFile, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start1));
+                    logger.info("Index flush to {} took {} ms.", outputFile, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start1));
 
                     for (int segment = 0; segment < segmentNumber; segment++)
                     {
diff --git a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java
index db16c52..60538e1 100644
--- a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java
+++ b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java
@@ -49,6 +49,8 @@ import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class QueryController
 {
     private final long executionQuota;
@@ -65,7 +67,7 @@ public class QueryController
         this.command = command;
         this.range = command.dataRange();
         this.executionQuota = TimeUnit.MILLISECONDS.toNanos(timeQuotaMs);
-        this.executionStart = System.nanoTime();
+        this.executionStart = nanoTime();
     }
 
     public TableMetadata metadata()
@@ -154,7 +156,7 @@ public class QueryController
 
     public void checkpoint()
     {
-	long executionTime = (System.nanoTime() - executionStart);
+	long executionTime = (nanoTime() - executionStart);
 
         if (executionTime >= executionQuota)
             throw new TimeQuotaExceededException(
diff --git a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
index 0ac189c..fa8a60f 100644
--- a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
@@ -54,6 +54,8 @@ import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Utility to write SSTables.
  * <p>
@@ -239,7 +241,7 @@ public class CQLSSTableWriter implements Closeable
         List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
         SortedSet<Clustering<?>> clusterings = insert.createClustering(options);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         // Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
         // and that forces a lot of initialization that we don't want.
         UpdateParameters params = new UpdateParameters(insert.metadata,
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java
index 8fe1def..e8266f8 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java
@@ -47,6 +47,10 @@ import java.nio.file.Paths;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.cassandra.io.sstable.format.SSTableReader.OpenReason.NORMAL;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public abstract class SSTableReaderBuilder
 {
     private static final Logger logger = LoggerFactory.getLogger(SSTableReaderBuilder.class);
@@ -270,7 +274,7 @@ public abstract class SSTableReaderBuilder
                         StatsMetadata statsMetadata,
                         SerializationHeader header)
         {
-            super(descriptor, metadataRef, System.currentTimeMillis(), components, statsMetadata, SSTableReader.OpenReason.NORMAL, header);
+            super(descriptor, metadataRef, currentTimeMillis(), components, statsMetadata, NORMAL, header);
         }
 
         @Override
@@ -338,7 +342,7 @@ public abstract class SSTableReaderBuilder
                        StatsMetadata statsMetadata,
                        SerializationHeader header)
         {
-            super(descriptor, metadataRef, System.currentTimeMillis(), components, statsMetadata, SSTableReader.OpenReason.NORMAL, header);
+            super(descriptor, metadataRef, currentTimeMillis(), components, statsMetadata, NORMAL, header);
             this.validationMetadata = validationMetadata;
             this.isOffline = isOffline;
         }
@@ -353,9 +357,9 @@ public abstract class SSTableReaderBuilder
             try
             {
                 // load index and filter
-                long start = System.nanoTime();
+                long start = nanoTime();
                 load(validationMetadata, isOffline, components, DatabaseDescriptor.getDiskOptimizationStrategy(), statsMetadata);
-                logger.trace("INDEX LOAD TIME for {}: {} ms.", descriptor, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+                logger.trace("INDEX LOAD TIME for {}: {} ms.", descriptor, TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
             }
             catch (IOException t)
             {
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
index 4607d99..281a11d 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
@@ -50,6 +50,8 @@ import org.apache.cassandra.schema.TableMetadataRef;
 import org.apache.cassandra.utils.*;
 import org.apache.cassandra.utils.concurrent.Transactional;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class BigTableWriter extends SSTableWriter
 {
     private static final Logger logger = LoggerFactory.getLogger(BigTableWriter.class);
@@ -384,7 +386,7 @@ public class BigTableWriter extends SSTableWriter
     private SSTableReader openFinal(SSTableReader.OpenReason openReason)
     {
         if (maxDataAge < 0)
-            maxDataAge = System.currentTimeMillis();
+            maxDataAge = currentTimeMillis();
 
         StatsMetadata stats = statsMetadata();
         // finalize in-memory state for the reader
diff --git a/src/java/org/apache/cassandra/locator/TokenMetadata.java b/src/java/org/apache/cassandra/locator/TokenMetadata.java
index 390413a..f811a4f 100644
--- a/src/java/org/apache/cassandra/locator/TokenMetadata.java
+++ b/src/java/org/apache/cassandra/locator/TokenMetadata.java
@@ -48,6 +48,7 @@ import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.SortedBiMultiValMap;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.LINE_SEPARATOR;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class TokenMetadata
 {
@@ -855,7 +856,7 @@ public class TokenMetadata
     public void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName)
     {
         // avoid race between both branches - do not use a lock here as this will block any other unrelated operations!
-        long startedAt = System.currentTimeMillis();
+        long startedAt = currentTimeMillis();
         synchronized (pendingRanges)
         {
             TokenMetadataDiagnostics.pendingRangeCalculationStarted(this, keyspaceName);
@@ -899,7 +900,7 @@ public class TokenMetadata
             if (logger.isDebugEnabled())
                 logger.debug("Starting pending range calculation for {}", keyspaceName);
 
-            long took = System.currentTimeMillis() - startedAt;
+            long took = currentTimeMillis() - startedAt;
 
             if (logger.isDebugEnabled())
                 logger.debug("Pending range calculation for {} completed (took: {}ms)", keyspaceName, took);
diff --git a/src/java/org/apache/cassandra/metrics/TableMetrics.java b/src/java/org/apache/cassandra/metrics/TableMetrics.java
index 6b7193c..426f34d 100644
--- a/src/java/org/apache/cassandra/metrics/TableMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/TableMetrics.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.metrics;
 
 import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 import java.nio.ByteBuffer;
 import java.util.*;
@@ -1291,12 +1292,12 @@ public class TableMetrics
             private Context(Timer [] all)
             {
                 this.all = all;
-                start = System.nanoTime();
+                start = nanoTime();
             }
 
             public void close()
             {
-                long duration = System.nanoTime() - start;
+                long duration = nanoTime() - start;
                 for (Timer t : all)
                     t.update(duration, TimeUnit.NANOSECONDS);
             }
diff --git a/src/java/org/apache/cassandra/net/AsyncPromise.java b/src/java/org/apache/cassandra/net/AsyncPromise.java
index 36bc304..d8bb24f 100644
--- a/src/java/org/apache/cassandra/net/AsyncPromise.java
+++ b/src/java/org/apache/cassandra/net/AsyncPromise.java
@@ -36,6 +36,7 @@ import io.netty.util.internal.ThrowableUtil;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
 import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater.*;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Netty's DefaultPromise uses a mutex to coordinate notifiers AND waiters between the eventLoop and the other threads.
@@ -406,7 +407,7 @@ public class AsyncPromise<V> implements Promise<V>
     public boolean await(long timeout, TimeUnit unit) throws InterruptedException
     {
         return await(unit.toNanos(timeout),
-                     (signal, nanos) -> signal.awaitUntil(nanos + System.nanoTime()));
+                     (signal, nanos) -> signal.awaitUntil(nanos + nanoTime()));
     }
 
     public boolean await(long timeoutMillis) throws InterruptedException
@@ -417,7 +418,7 @@ public class AsyncPromise<V> implements Promise<V>
     public boolean awaitUninterruptibly(long timeout, TimeUnit unit)
     {
         return await(unit.toNanos(timeout),
-                     (signal, nanos) -> signal.awaitUntilUninterruptibly(nanos + System.nanoTime()));
+                     (signal, nanos) -> signal.awaitUntilUninterruptibly(nanos + nanoTime()));
     }
 
     public boolean awaitUninterruptibly(long timeoutMillis)
diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java
index 4d712e8..747d740 100644
--- a/src/java/org/apache/cassandra/net/MessagingService.java
+++ b/src/java/org/apache/cassandra/net/MessagingService.java
@@ -44,6 +44,7 @@ import org.apache.cassandra.utils.FBUtilities;
 import static java.util.Collections.synchronizedList;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.cassandra.concurrent.Stage.MUTATION;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.Throwables.maybeFail;
 
 /**
@@ -449,7 +450,7 @@ public final class MessagingService extends MessagingServiceMBeanImpl
             for (OutboundConnections pool : channelManagers.values())
                 closing.add(pool.close(true));
 
-            long deadline = System.nanoTime() + units.toNanos(timeout);
+            long deadline = nanoTime() + units.toNanos(timeout);
             maybeFail(() -> new FutureCombiner(closing).get(timeout, units),
                       () -> {
                           List<ExecutorService> inboundExecutors = new ArrayList<>();
@@ -473,7 +474,7 @@ public final class MessagingService extends MessagingServiceMBeanImpl
             for (OutboundConnections pool : channelManagers.values())
                 closing.add(pool.close(false));
 
-            long deadline = System.nanoTime() + units.toNanos(timeout);
+            long deadline = nanoTime() + units.toNanos(timeout);
             maybeFail(() -> new FutureCombiner(closing).get(timeout, units),
                       () -> {
                           if (shutdownExecutors)
diff --git a/src/java/org/apache/cassandra/net/RequestCallbacks.java b/src/java/org/apache/cassandra/net/RequestCallbacks.java
index c102ee1..9adec9b 100644
--- a/src/java/org/apache/cassandra/net/RequestCallbacks.java
+++ b/src/java/org/apache/cassandra/net/RequestCallbacks.java
@@ -48,6 +48,7 @@ import static java.lang.String.format;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.concurrent.Stage.INTERNAL_RESPONSE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.preciseTime;
 
 /**
@@ -199,7 +200,7 @@ public class RequestCallbacks implements OutboundMessageCallbacks
     {
         if (!executor.isTerminated())
         {
-            long wait = deadlineNanos - System.nanoTime();
+            long wait = deadlineNanos - nanoTime();
             if (wait <= 0 || !executor.awaitTermination(wait, NANOSECONDS))
                 throw new TimeoutException();
         }
diff --git a/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java b/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
index b901338..073db33 100644
--- a/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
+++ b/src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
@@ -47,6 +47,7 @@ import org.apache.cassandra.utils.FBUtilities;
 import static org.apache.cassandra.net.Verb.PING_REQ;
 import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
 import static org.apache.cassandra.net.ConnectionType.SMALL_MESSAGES;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class StartupClusterConnectivityChecker
 {
@@ -125,7 +126,7 @@ public class StartupClusterConnectivityChecker
                                    new CountDownLatch(Math.max(datacenterToPeers.get(datacenter).size() - 1, 0)));
         }
 
-        long startNanos = System.nanoTime();
+        long startNanos = nanoTime();
 
         // set up a listener to react to new nodes becoming alive (in gossip), and account for all the nodes that are already alive
         Set<InetAddressAndPort> alivePeers = Collections.newSetFromMap(new ConcurrentHashMap<>());
@@ -150,7 +151,7 @@ public class StartupClusterConnectivityChecker
         boolean succeeded = true;
         for (CountDownLatch countDownLatch : dcToRemainingPeers.values())
         {
-            long remainingNanos = Math.max(1, timeoutNanos - (System.nanoTime() - startNanos));
+            long remainingNanos = Math.max(1, timeoutNanos - (nanoTime() - startNanos));
             //noinspection UnstableApiUsage
             succeeded &= Uninterruptibles.awaitUninterruptibly(countDownLatch, remainingNanos, TimeUnit.NANOSECONDS);
         }
@@ -164,12 +165,12 @@ public class StartupClusterConnectivityChecker
         if (succeeded)
         {
             logger.info("Ensured sufficient healthy connections with {} after {} milliseconds",
-                        numDown.keySet(), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
+                        numDown.keySet(), TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNanos));
         }
         else
         {
             logger.warn("Timed out after {} milliseconds, was waiting for remaining peers to connect: {}",
-                        TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos), numDown);
+                        TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNanos), numDown);
         }
 
         return succeeded;
diff --git a/src/java/org/apache/cassandra/repair/RepairJob.java b/src/java/org/apache/cassandra/repair/RepairJob.java
index fb66be7..9661064 100644
--- a/src/java/org/apache/cassandra/repair/RepairJob.java
+++ b/src/java/org/apache/cassandra/repair/RepairJob.java
@@ -46,6 +46,8 @@ import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTrees;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * RepairJob runs repair on given ColumnFamily.
  */
@@ -207,7 +209,7 @@ public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
                                                   boolean pullRepair,
                                                   PreviewKind previewKind)
     {
-        long startedAt = System.currentTimeMillis();
+        long startedAt = currentTimeMillis();
         List<SyncTask> syncTasks = new ArrayList<>();
         // We need to difference all trees one against another
         for (int i = 0; i < trees.size() - 1; ++i)
@@ -262,7 +264,7 @@ public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
         }
         trees.get(trees.size() - 1).trees.release();
         logger.info("Created {} sync tasks based on {} merkle tree responses for {} (took: {}ms)",
-                    syncTasks.size(), trees.size(), desc.parentSessionId, System.currentTimeMillis() - startedAt);
+                    syncTasks.size(), trees.size(), desc.parentSessionId, currentTimeMillis() - startedAt);
         return syncTasks;
     }
 
@@ -302,7 +304,7 @@ public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
                                                           boolean isIncremental,
                                                           PreviewKind previewKind)
     {
-        long startedAt = System.currentTimeMillis();
+        long startedAt = currentTimeMillis();
         List<SyncTask> syncTasks = new ArrayList<>();
         // We need to difference all trees one against another
         DifferenceHolder diffHolder = new DifferenceHolder(trees);
@@ -353,7 +355,7 @@ public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
             }
         }
         logger.info("Created {} optimised sync tasks based on {} merkle tree responses for {} (took: {}ms)",
-                    syncTasks.size(), trees.size(), desc.parentSessionId, System.currentTimeMillis() - startedAt);
+                    syncTasks.size(), trees.size(), desc.parentSessionId, currentTimeMillis() - startedAt);
         logger.trace("Optimised sync tasks for {}: {}", desc.parentSessionId, syncTasks);
         return syncTasks;
     }
diff --git a/src/java/org/apache/cassandra/repair/RepairRunnable.java b/src/java/org/apache/cassandra/repair/RepairRunnable.java
index 4f76a8d..f536da6 100644
--- a/src/java/org/apache/cassandra/repair/RepairRunnable.java
+++ b/src/java/org/apache/cassandra/repair/RepairRunnable.java
@@ -96,6 +96,10 @@ import org.apache.cassandra.utils.progress.ProgressEventNotifier;
 import org.apache.cassandra.utils.progress.ProgressEventType;
 import org.apache.cassandra.utils.progress.ProgressListener;
 
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class RepairRunnable implements Runnable, ProgressEventNotifier
 {
     private static final Logger logger = LoggerFactory.getLogger(RepairRunnable.class);
@@ -109,7 +113,7 @@ public class RepairRunnable implements Runnable, ProgressEventNotifier
     private final AtomicInteger progressCounter = new AtomicInteger();
     private final int totalProgress;
 
-    private final long creationTimeMillis = System.currentTimeMillis();
+    private final long creationTimeMillis = currentTimeMillis();
     private final UUID parentSession = UUIDGen.getTimeUUID();
 
     private final List<ProgressListener> listeners = new ArrayList<>();
@@ -217,7 +221,7 @@ public class RepairRunnable implements Runnable, ProgressEventNotifier
 
     private void complete(String msg)
     {
-        long durationMillis = System.currentTimeMillis() - creationTimeMillis;
+        long durationMillis = currentTimeMillis() - creationTimeMillis;
         if (msg == null)
         {
             String duration = DurationFormatUtils.formatDurationWords(durationMillis, true, true);
@@ -804,7 +808,7 @@ public class RepairRunnable implements Runnable, ProgressEventNotifier
                 int si = 0;
                 UUID uuid;
 
-                long tlast = System.currentTimeMillis(), tcur;
+                long tlast = currentTimeMillis(), tcur;
 
                 TraceState.Status status;
                 long minWaitMillis = 125;
@@ -825,11 +829,11 @@ public class RepairRunnable implements Runnable, ProgressEventNotifier
                         shouldDouble = false;
                     }
                     ByteBuffer tminBytes = ByteBufferUtil.bytes(UUIDGen.minTimeUUID(tlast - 1000));
-                    ByteBuffer tmaxBytes = ByteBufferUtil.bytes(UUIDGen.maxTimeUUID(tcur = System.currentTimeMillis()));
+                    ByteBuffer tmaxBytes = ByteBufferUtil.bytes(UUIDGen.maxTimeUUID(tcur = currentTimeMillis()));
                     QueryOptions options = QueryOptions.forInternalCalls(ConsistencyLevel.ONE, Lists.newArrayList(sessionIdBytes,
                                                                                                                   tminBytes,
                                                                                                                   tmaxBytes));
-                    ResultMessage.Rows rows = statement.execute(QueryState.forInternalCalls(), options, System.nanoTime());
+                    ResultMessage.Rows rows = statement.execute(forInternalCalls(), options, nanoTime());
                     UntypedResultSet result = UntypedResultSet.create(rows.result);
 
                     for (UntypedResultSet.Row r : result)
diff --git a/src/java/org/apache/cassandra/repair/SyncTask.java b/src/java/org/apache/cassandra/repair/SyncTask.java
index a63f037..5b23397 100644
--- a/src/java/org/apache/cassandra/repair/SyncTask.java
+++ b/src/java/org/apache/cassandra/repair/SyncTask.java
@@ -35,6 +35,8 @@ import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.streaming.PreviewKind;
 import org.apache.cassandra.tracing.Tracing;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public abstract class SyncTask extends AbstractFuture<SyncStat> implements Runnable
 {
     private static Logger logger = LoggerFactory.getLogger(SyncTask.class);
@@ -70,7 +72,7 @@ public abstract class SyncTask extends AbstractFuture<SyncStat> implements Runna
      */
     public final void run()
     {
-        startTime = System.currentTimeMillis();
+        startTime = currentTimeMillis();
 
 
         // choose a repair method based on the significance of the difference
@@ -97,6 +99,6 @@ public abstract class SyncTask extends AbstractFuture<SyncStat> implements Runna
     protected void finished()
     {
         if (startTime != Long.MIN_VALUE)
-            Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily).metric.repairSyncTime.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
+            Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily).metric.repairSyncTime.update(currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
     }
 }
diff --git a/src/java/org/apache/cassandra/repair/ValidationManager.java b/src/java/org/apache/cassandra/repair/ValidationManager.java
index 1b6ef56..7077dd3 100644
--- a/src/java/org/apache/cassandra/repair/ValidationManager.java
+++ b/src/java/org/apache/cassandra/repair/ValidationManager.java
@@ -39,6 +39,8 @@ import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.MerkleTree;
 import org.apache.cassandra.utils.MerkleTrees;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class ValidationManager
 {
     private static final Logger logger = LoggerFactory.getLogger(ValidationManager.class);
@@ -107,7 +109,7 @@ public class ValidationManager
 
         // Create Merkle trees suitable to hold estimated partitions for the given ranges.
         // We blindly assume that a partition is evenly distributed on all sstables for now.
-        long start = System.nanoTime();
+        long start = nanoTime();
         long partitionCount = 0;
         long estimatedTotalBytes = 0;
         try (ValidationPartitionIterator vi = getValidationIterator(cfs.getRepairManager(), validator))
@@ -140,7 +142,7 @@ public class ValidationManager
         }
         if (logger.isDebugEnabled())
         {
-            long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+            long duration = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
             logger.debug("Validation of {} partitions (~{}) finished in {} msec, for {}",
                          partitionCount,
                          FBUtilities.prettyPrintMemory(estimatedTotalBytes),
diff --git a/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java b/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java
index 0c2c3b6..83d15b8 100644
--- a/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java
+++ b/src/java/org/apache/cassandra/repair/consistent/CoordinatorSession.java
@@ -53,6 +53,8 @@ import org.apache.cassandra.repair.messages.PrepareConsistentRequest;
 import org.apache.cassandra.repair.messages.RepairMessage;
 import org.apache.cassandra.service.ActiveRepairService;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Coordinator side logic and state of a consistent repair session. Like {@link ActiveRepairService.ParentRepairSession},
  * there is only one {@code CoordinatorSession} per user repair command, regardless of the number of tables and token
@@ -290,7 +292,7 @@ public class CoordinatorSession extends ConsistentSession
     {
         logger.info("Beginning coordination of incremental repair session {}", sessionID);
 
-        sessionStart = System.currentTimeMillis();
+        sessionStart = currentTimeMillis();
         ListenableFuture<Boolean> prepareResult = prepare();
 
         // run repair sessions normally
@@ -300,7 +302,7 @@ public class CoordinatorSession extends ConsistentSession
             {
                 if (success)
                 {
-                    repairStart = System.currentTimeMillis();
+                    repairStart = currentTimeMillis();
                     if (logger.isDebugEnabled())
                     {
                         logger.debug("Incremental repair {} prepare phase completed in {}", sessionID, formatDuration(sessionStart, repairStart));
@@ -323,7 +325,7 @@ public class CoordinatorSession extends ConsistentSession
             {
                 if (results == null || results.isEmpty() || Iterables.any(results, r -> r == null))
                 {
-                    finalizeStart = System.currentTimeMillis();
+                    finalizeStart = currentTimeMillis();
                     if (logger.isDebugEnabled())
                     {
                         logger.debug("Incremental repair {} validation/stream phase completed in {}", sessionID, formatDuration(repairStart, finalizeStart));
@@ -352,12 +354,12 @@ public class CoordinatorSession extends ConsistentSession
                     {
                         if (logger.isDebugEnabled())
                         {
-                            logger.debug("Incremental repair {} finalization phase completed in {}", sessionID, formatDuration(finalizeStart, System.currentTimeMillis()));
+                            logger.debug("Incremental repair {} finalization phase completed in {}", sessionID, formatDuration(finalizeStart, currentTimeMillis()));
                         }
                         finalizeCommit();
                         if (logger.isDebugEnabled())
                         {
-                            logger.debug("Incremental repair {} phase completed in {}", sessionID, formatDuration(sessionStart, System.currentTimeMillis()));
+                            logger.debug("Incremental repair {} phase completed in {}", sessionID, formatDuration(sessionStart, currentTimeMillis()));
                         }
                     }
                     else
@@ -379,7 +381,7 @@ public class CoordinatorSession extends ConsistentSession
                 {
                     if (logger.isDebugEnabled())
                     {
-                        logger.debug("Incremental repair {} phase failed in {}", sessionID, formatDuration(sessionStart, System.currentTimeMillis()));
+                        logger.debug("Incremental repair {} phase failed in {}", sessionID, formatDuration(sessionStart, currentTimeMillis()));
                     }
                     hasFailure.set(true);
                     fail();
diff --git a/src/java/org/apache/cassandra/schema/MigrationCoordinator.java b/src/java/org/apache/cassandra/schema/MigrationCoordinator.java
index 824b83f..9f12fe0 100644
--- a/src/java/org/apache/cassandra/schema/MigrationCoordinator.java
+++ b/src/java/org/apache/cassandra/schema/MigrationCoordinator.java
@@ -61,6 +61,8 @@ import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class MigrationCoordinator
 {
     private static final Logger logger = LoggerFactory.getLogger(MigrationCoordinator.class);
@@ -581,7 +583,7 @@ public class MigrationCoordinator
                 signal = WaitQueue.all(signals);
             }
 
-            return signal.awaitUntil(System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(waitMillis));
+            return signal.awaitUntil(nanoTime() + TimeUnit.MILLISECONDS.toNanos(waitMillis));
         }
         catch (InterruptedException e)
         {
diff --git a/src/java/org/apache/cassandra/serializers/TimestampSerializer.java b/src/java/org/apache/cassandra/serializers/TimestampSerializer.java
index ab048d0..80091b9 100644
--- a/src/java/org/apache/cassandra/serializers/TimestampSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/TimestampSerializer.java
@@ -35,6 +35,8 @@ import java.util.List;
 import java.util.TimeZone;
 import java.util.regex.Pattern;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 
 public class TimestampSerializer extends TypeSerializer<Date>
 {
@@ -139,7 +141,7 @@ public class TimestampSerializer extends TypeSerializer<Date>
     public static long dateStringToTimestamp(String source) throws MarshalException
     {
         if (source.equalsIgnoreCase("now"))
-            return System.currentTimeMillis();
+            return currentTimeMillis();
 
         // Milliseconds since epoch?
         if (timestampPattern.matcher(source).matches())
diff --git a/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java b/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java
index 4f384a4..cd43338 100644
--- a/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/AbstractWriteResponseHandler.java
@@ -45,6 +45,7 @@ import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 
 public abstract class AbstractWriteResponseHandler<T> implements RequestCallback<T>
@@ -130,7 +131,7 @@ public abstract class AbstractWriteResponseHandler<T> implements RequestCallback
         long requestTimeout = writeType == WriteType.COUNTER
                               ? DatabaseDescriptor.getCounterWriteRpcTimeout(NANOSECONDS)
                               : DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS);
-        return requestTimeout - (System.nanoTime() - queryStartNanoTime);
+        return requestTimeout - (nanoTime() - queryStartNanoTime);
     }
 
     /**
@@ -287,7 +288,7 @@ public abstract class AbstractWriteResponseHandler<T> implements RequestCallback
             }
             else
             {
-                replicaPlan.keyspace().metric.idealCLWriteLatency.addNano(System.nanoTime() - queryStartNanoTime);
+                replicaPlan.keyspace().metric.idealCLWriteLatency.addNano(nanoTime() - queryStartNanoTime);
             }
         }
     }
diff --git a/src/java/org/apache/cassandra/service/ActiveRepairService.java b/src/java/org/apache/cassandra/service/ActiveRepairService.java
index cc470fd..e172ed3 100644
--- a/src/java/org/apache/cassandra/service/ActiveRepairService.java
+++ b/src/java/org/apache/cassandra/service/ActiveRepairService.java
@@ -96,6 +96,7 @@ import org.apache.cassandra.utils.UUIDGen;
 import static com.google.common.collect.Iterables.concat;
 import static com.google.common.collect.Iterables.transform;
 import static org.apache.cassandra.net.Verb.PREPARE_MSG;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * ActiveRepairService is the starting point for manual "active" repairs.
@@ -511,7 +512,7 @@ public class ActiveRepairService implements IEndpointStateChangeSubscriber, IFai
         // end up skipping replicas
         if (options.isIncremental() && options.isGlobal() && ! force)
         {
-            return System.currentTimeMillis();
+            return currentTimeMillis();
         }
         else
         {
diff --git a/src/java/org/apache/cassandra/service/ClientState.java b/src/java/org/apache/cassandra/service/ClientState.java
index 0afff9c..24099c3 100644
--- a/src/java/org/apache/cassandra/service/ClientState.java
+++ b/src/java/org/apache/cassandra/service/ClientState.java
@@ -50,6 +50,8 @@ import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * State related to a client connection.
  */
@@ -198,7 +200,7 @@ public class ClientState
     {
         while (true)
         {
-            long current = System.currentTimeMillis() * 1000;
+            long current = currentTimeMillis() * 1000;
             long last = lastTimestampMicros.get();
             long tstamp = last >= current ? last + 1 : current;
             if (lastTimestampMicros.compareAndSet(last, tstamp))
@@ -252,7 +254,7 @@ public class ClientState
     {
         while (true)
         {
-            long current = Math.max(System.currentTimeMillis() * 1000, minTimestampToUse);
+            long current = Math.max(currentTimeMillis() * 1000, minTimestampToUse);
             long last = lastTimestampMicros.get();
             long tstamp = last >= current ? last + 1 : current;
             // Note that if we ended up picking minTimestampMicrosToUse (it was "in the future"), we don't
diff --git a/src/java/org/apache/cassandra/service/GCInspector.java b/src/java/org/apache/cassandra/service/GCInspector.java
index 02fd720..55f1a2e 100644
--- a/src/java/org/apache/cassandra/service/GCInspector.java
+++ b/src/java/org/apache/cassandra/service/GCInspector.java
@@ -50,6 +50,8 @@ import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.utils.MBeanWrapper;
 import org.apache.cassandra.utils.StatusLogger;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class GCInspector implements NotificationListener, GCInspectorMXBean
 {
     public static final String MBEAN_NAME = "org.apache.cassandra.service:type=GCInspector";
@@ -113,7 +115,7 @@ public class GCInspector implements NotificationListener, GCInspectorMXBean
         State()
         {
             count = maxRealTimeElapsed = sumSquaresRealTimeElapsed = totalRealTimeElapsed = totalBytesReclaimed = 0;
-            startNanos = System.nanoTime();
+            startNanos = nanoTime();
         }
     }
 
@@ -318,7 +320,7 @@ public class GCInspector implements NotificationListener, GCInspectorMXBean
     {
         State state = getTotalSinceLastCheck();
         double[] r = new double[7];
-        r[0] = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - state.startNanos);
+        r[0] = TimeUnit.NANOSECONDS.toMillis(nanoTime() - state.startNanos);
         r[1] = state.maxRealTimeElapsed;
         r[2] = state.totalRealTimeElapsed;
         r[3] = state.sumSquaresRealTimeElapsed;
diff --git a/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java b/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java
index 1c6b183..549dc38 100644
--- a/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java
+++ b/src/java/org/apache/cassandra/service/PendingRangeCalculatorService.java
@@ -34,6 +34,8 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class PendingRangeCalculatorService
 {
     public static final PendingRangeCalculatorService instance = new PendingRangeCalculatorService();
@@ -71,12 +73,12 @@ public class PendingRangeCalculatorService
             try
             {
                 PendingRangeCalculatorServiceDiagnostics.taskStarted(instance, updateJobs);
-                long start = System.currentTimeMillis();
+                long start = currentTimeMillis();
                 List<String> keyspaces = Schema.instance.getNonLocalStrategyKeyspaces();
                 for (String keyspaceName : keyspaces)
                     calculatePendingRanges(Keyspace.open(keyspaceName).getReplicationStrategy(), keyspaceName);
                 if (logger.isTraceEnabled())
-                    logger.trace("Finished PendingRangeTask for {} keyspaces in {}ms", keyspaces.size(), System.currentTimeMillis() - start);
+                    logger.trace("Finished PendingRangeTask for {} keyspaces in {}ms", keyspaces.size(), currentTimeMillis() - start);
                 PendingRangeCalculatorServiceDiagnostics.taskFinished(instance, updateJobs);
             }
             finally
diff --git a/src/java/org/apache/cassandra/service/StartupChecks.java b/src/java/org/apache/cassandra/service/StartupChecks.java
index dadb0c5..4f9b82f 100644
--- a/src/java/org/apache/cassandra/service/StartupChecks.java
+++ b/src/java/org/apache/cassandra/service/StartupChecks.java
@@ -60,6 +60,7 @@ import static java.lang.String.format;
 import static org.apache.cassandra.config.CassandraRelevantProperties.COM_SUN_MANAGEMENT_JMXREMOTE_PORT;
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_VERSION;
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_VM_NAME;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 /**
  * Verifies that the system and environment is in a fit state to be started.
@@ -169,7 +170,7 @@ public class StartupChecks
         private static final long EARLIEST_LAUNCH_DATE = 1215820800000L;
         public void execute() throws StartupException
         {
-            long now = System.currentTimeMillis();
+            long now = currentTimeMillis();
             if (now < EARLIEST_LAUNCH_DATE)
                 throw new StartupException(StartupException.ERR_WRONG_MACHINE_STATE,
                                            String.format("current machine time is %s, but that is seemingly incorrect. exiting now.",
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index 3ad1050..0f29875 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -147,6 +147,8 @@ import static org.apache.cassandra.net.Verb.TRUNCATE_REQ;
 import static org.apache.cassandra.service.BatchlogResponseHandler.BatchlogCleanup;
 import static org.apache.cassandra.service.paxos.PrepareVerbHandler.doPrepare;
 import static org.apache.cassandra.service.paxos.ProposeVerbHandler.doPropose;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class StorageProxy implements StorageProxyMBean
 {
@@ -288,7 +290,7 @@ public class StorageProxy implements StorageProxyMBean
                                   long queryStartNanoTime)
     throws UnavailableException, IsBootstrappingException, RequestFailureException, RequestTimeoutException, InvalidRequestException, CasWriteUnknownResultException
     {
-        final long startTimeForMetrics = System.nanoTime();
+        final long startTimeForMetrics = nanoTime();
         try
         {
             TableMetadata metadata = Schema.instance.validateTable(keyspaceName, cfName);
@@ -380,7 +382,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         finally
         {
-            final long latency = System.nanoTime() - startTimeForMetrics;
+            final long latency = nanoTime() - startTimeForMetrics;
             casWriteMetrics.addNano(latency);
             writeMetricsMap.get(consistencyForPaxos).addNano(latency);
         }
@@ -448,7 +450,7 @@ public class StorageProxy implements StorageProxyMBean
             consistencyForCommit.validateForCasCommit(latestRs);
 
             long timeoutNanos = DatabaseDescriptor.getCasContentionTimeout(NANOSECONDS);
-            while (System.nanoTime() - queryStartNanoTime < timeoutNanos)
+            while (nanoTime() - queryStartNanoTime < timeoutNanos)
             {
                 // for simplicity, we'll do a single liveness check at the start of each attempt
                 ReplicaPlan.ForPaxosWrite replicaPlan = ReplicaPlans.forPaxos(keyspace, key, consistencyForPaxos);
@@ -535,7 +537,7 @@ public class StorageProxy implements StorageProxyMBean
 
         PrepareCallback summary = null;
         int contentions = 0;
-        while (System.nanoTime() - queryStartNanoTime < timeoutNanos)
+        while (nanoTime() - queryStartNanoTime < timeoutNanos)
         {
             // We want a timestamp that is guaranteed to be unique for that node (so that the ballot is globally unique), but if we've got a prepare rejected
             // already we also want to make sure we pick a timestamp that has a chance to be promised, i.e. one that is greater that the most recently known
@@ -806,7 +808,7 @@ public class StorageProxy implements StorageProxyMBean
      *
      * @param mutations the mutations to be applied across the replicas
      * @param consistencyLevel the consistency level for the operation
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static void mutate(List<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, OverloadedException, WriteTimeoutException, WriteFailureException
@@ -814,7 +816,7 @@ public class StorageProxy implements StorageProxyMBean
         Tracing.trace("Determining replicas for mutation");
         final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
         List<AbstractWriteResponseHandler<IMutation>> responseHandlers = new ArrayList<>(mutations.size());
         WriteType plainWriteType = mutations.size() <= 1 ? WriteType.SIMPLE : WriteType.UNLOGGED_BATCH;
@@ -882,7 +884,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
             writeMetrics.addNano(latency);
             writeMetricsMap.get(consistencyLevel).addNano(latency);
             updateCoordinatorWriteLatencyTableMetric(mutations, latency);
@@ -938,7 +940,7 @@ public class StorageProxy implements StorageProxyMBean
      * @param mutations the mutations to be applied across the replicas
      * @param writeCommitLog if commitlog should be written
      * @param baseComplete time from epoch in ms that the local base mutation was(or will be) completed
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static void mutateMV(ByteBuffer dataKey, Collection<Mutation> mutations, boolean writeCommitLog, AtomicLong baseComplete, long queryStartNanoTime)
     throws UnavailableException, OverloadedException, WriteTimeoutException
@@ -946,7 +948,7 @@ public class StorageProxy implements StorageProxyMBean
         Tracing.trace("Determining replicas for mutation");
         final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
 
         try
@@ -1041,7 +1043,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         finally
         {
-            viewWriteMetrics.addNano(System.nanoTime() - startTime);
+            viewWriteMetrics.addNano(nanoTime() - startTime);
         }
     }
 
@@ -1082,7 +1084,7 @@ public class StorageProxy implements StorageProxyMBean
      * @param mutations the Mutations to be applied across the replicas
      * @param consistency_level the consistency level for the operation
      * @param requireQuorumForRemove at least a quorum of nodes will see update before deleting batchlog
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static void mutateAtomically(Collection<Mutation> mutations,
                                         ConsistencyLevel consistency_level,
@@ -1091,7 +1093,7 @@ public class StorageProxy implements StorageProxyMBean
     throws UnavailableException, OverloadedException, WriteTimeoutException
     {
         Tracing.trace("Determining replicas for atomic batch");
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
 
         List<WriteResponseHandlerWrapper> wrappers = new ArrayList<>(mutations.size());
 
@@ -1162,7 +1164,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
             writeMetrics.addNano(latency);
             writeMetricsMap.get(consistency_level).addNano(latency);
             updateCoordinatorWriteLatencyTableMetric(mutations, latency);
@@ -1273,7 +1275,7 @@ public class StorageProxy implements StorageProxyMBean
      * given the list of write endpoints (either standardWritePerformer for
      * standard writes or counterWritePerformer for counter writes).
      * @param callback an optional callback to be run if and when the write is
-     * @param queryStartNanoTime the value of System.nanoTime() when the query started to be processed
+     * @param queryStartNanoTime the value of nanoTime() when the query started to be processed
      */
     public static AbstractWriteResponseHandler<IMutation> performWrite(IMutation mutation,
                                                                        ConsistencyLevel consistencyLevel,
@@ -1330,7 +1332,7 @@ public class StorageProxy implements StorageProxyMBean
         ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, liveAndDown, ReplicaPlans.writeAll);
         AbstractReplicationStrategy replicationStrategy = replicaPlan.replicationStrategy();
         AbstractWriteResponseHandler<IMutation> writeHandler = replicationStrategy.getWriteResponseHandler(replicaPlan, () -> {
-            long delay = Math.max(0, System.currentTimeMillis() - baseComplete.get());
+            long delay = Math.max(0, currentTimeMillis() - baseComplete.get());
             viewWriteMetrics.viewWriteLatency.update(delay, MILLISECONDS);
         }, writeType, queryStartNanoTime);
         BatchlogResponseHandler<IMutation> batchHandler = new ViewWriteMetricsWrapped(writeHandler, batchConsistencyLevel.blockFor(replicationStrategy), cleanup, queryStartNanoTime);
@@ -1747,7 +1749,7 @@ public class StorageProxy implements StorageProxyMBean
         if (group.queries.size() > 1)
             throw new InvalidRequestException("SERIAL/LOCAL_SERIAL consistency may only be requested for one partition at a time");
 
-        long start = System.nanoTime();
+        long start = nanoTime();
         SinglePartitionReadCommand command = group.queries.get(0);
         TableMetadata metadata = command.metadata();
         DecoratedKey key = command.partitionKey();
@@ -1824,7 +1826,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         finally
         {
-            long latency = System.nanoTime() - start;
+            long latency = nanoTime() - start;
             readMetrics.addNano(latency);
             casReadMetrics.addNano(latency);
             readMetricsMap.get(consistencyLevel).addNano(latency);
@@ -1838,7 +1840,7 @@ public class StorageProxy implements StorageProxyMBean
     private static PartitionIterator readRegular(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, ReadFailureException, ReadTimeoutException
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             PartitionIterator result = fetchRows(group.queries, consistencyLevel, queryStartNanoTime);
@@ -1876,7 +1878,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         finally
         {
-            long latency = System.nanoTime() - start;
+            long latency = nanoTime() - start;
             readMetrics.addNano(latency);
             readMetricsMap.get(consistencyLevel).addNano(latency);
             // TODO avoid giving every command the same latency number.  Can fix this in CASSADRA-5329
@@ -2480,7 +2482,7 @@ public class StorageProxy implements StorageProxyMBean
                         logger.debug("Discarding hint for endpoint not part of ring: {}", target);
                 }
                 logger.trace("Adding hints for {}", validTargets);
-                HintsService.instance.write(hostIds, Hint.create(mutation, System.currentTimeMillis()));
+                HintsService.instance.write(hostIds, Hint.create(mutation, currentTimeMillis()));
                 validTargets.forEach(HintsService.instance.metrics::incrCreatedHints);
                 // Notify the handler only for CL == ANY
                 if (responseHandler != null && responseHandler.replicaPlan.consistencyLevel() == ConsistencyLevel.ANY)
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 0e663c0..0299fac 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -134,6 +134,8 @@ import static org.apache.cassandra.index.SecondaryIndexManager.isIndexColumnFami
 import static org.apache.cassandra.net.NoPayload.noPayload;
 import static org.apache.cassandra.net.Verb.REPLICATION_DONE_REQ;
 import static org.apache.cassandra.schema.MigrationManager.evolveSystemKeyspace;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * This abstraction contains the token/identifier of this node
@@ -392,7 +394,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
                 setGossipTokens(tokens);
 
             Gossiper.instance.forceNewerGeneration();
-            Gossiper.instance.start((int) (System.currentTimeMillis() / 1000));
+            Gossiper.instance.start((int) (currentTimeMillis() / 1000));
             gossipActive = true;
         }
     }
@@ -725,7 +727,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
         initialized = true;
         gossipActive = true;
         Gossiper.instance.register(this);
-        Gossiper.instance.start((int) (System.currentTimeMillis() / 1000)); // needed for node-ring gathering.
+        Gossiper.instance.start((int) (currentTimeMillis() / 1000)); // needed for node-ring gathering.
         Gossiper.instance.addLocalApplicationState(ApplicationState.NET_VERSION, valueFactory.networkVersion());
         MessagingService.instance().listen();
     }
@@ -1703,7 +1705,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
                     if (existing != null)
                     {
                         long nanoDelay = schemaDelay * 1000000L;
-                        if (Gossiper.instance.getEndpointStateForEndpoint(existing).getUpdateTimestamp() > (System.nanoTime() - nanoDelay))
+                        if (Gossiper.instance.getEndpointStateForEndpoint(existing).getUpdateTimestamp() > (nanoTime() - nanoDelay))
                             throw new UnsupportedOperationException("Cannot replace a live node... ");
                         collisions.add(existing);
                     }
diff --git a/src/java/org/apache/cassandra/service/TruncateResponseHandler.java b/src/java/org/apache/cassandra/service/TruncateResponseHandler.java
index 60e8d0b..f6c1506 100644
--- a/src/java/org/apache/cassandra/service/TruncateResponseHandler.java
+++ b/src/java/org/apache/cassandra/service/TruncateResponseHandler.java
@@ -34,6 +34,8 @@ import org.apache.cassandra.net.Message;
 import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.config.DatabaseDescriptor.getTruncateRpcTimeout;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class TruncateResponseHandler implements RequestCallback<TruncateResponse>
 {
@@ -51,12 +53,12 @@ public class TruncateResponseHandler implements RequestCallback<TruncateResponse
         assert 1 <= responseCount: "invalid response count " + responseCount;
 
         this.responseCount = responseCount;
-        start = System.nanoTime();
+        start = nanoTime();
     }
 
     public void get() throws TimeoutException
     {
-        long timeoutNanos = DatabaseDescriptor.getTruncateRpcTimeout(NANOSECONDS) - (System.nanoTime() - start);
+        long timeoutNanos = getTruncateRpcTimeout(NANOSECONDS) - (nanoTime() - start);
         boolean completedInTime;
         try
         {
diff --git a/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java b/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java
index dbc4fc0..bf29ff6 100644
--- a/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/AggregationQueryPager.java
@@ -29,6 +29,8 @@ import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.db.rows.RowIterator;
 import org.apache.cassandra.service.ClientState;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * {@code QueryPager} that takes care of fetching the pages for aggregation queries.
  * <p>
@@ -71,9 +73,9 @@ public final class AggregationQueryPager implements QueryPager
     public PartitionIterator fetchPageInternal(int pageSize, ReadExecutionController executionController)
     {
         if (limits.isGroupByLimit())
-            return new GroupByPartitionIterator(pageSize, executionController, System.nanoTime());
+            return new GroupByPartitionIterator(pageSize, executionController, nanoTime());
 
-        return new AggregationPartitionIterator(pageSize, executionController, System.nanoTime());
+        return new AggregationPartitionIterator(pageSize, executionController, nanoTime());
     }
 
     @Override
diff --git a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
index ca16967..7e64e03 100644
--- a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
+++ b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
@@ -30,6 +30,8 @@ import org.apache.cassandra.exceptions.RequestValidationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.service.ClientState;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Pager over a list of SinglePartitionReadQuery.
  *
@@ -158,7 +160,7 @@ public class MultiPartitionPager<T extends SinglePartitionReadQuery> implements
     public PartitionIterator fetchPageInternal(int pageSize, ReadExecutionController executionController) throws RequestValidationException, RequestExecutionException
     {
         int toQuery = Math.min(remaining, pageSize);
-        return new PagersIterator(toQuery, null, null, executionController, System.nanoTime());
+        return new PagersIterator(toQuery, null, null, executionController, nanoTime());
     }
 
     private class PagersIterator extends AbstractIterator<RowIterator> implements PartitionIterator
diff --git a/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java b/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java
index ab24f50..5624df4 100644
--- a/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java
+++ b/src/java/org/apache/cassandra/service/paxos/AbstractPaxosCallback.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.net.RequestCallback;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public abstract class AbstractPaxosCallback<T> implements RequestCallback<T>
 {
@@ -54,7 +55,7 @@ public abstract class AbstractPaxosCallback<T> implements RequestCallback<T>
     {
         try
         {
-            long timeout = DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS) - (System.nanoTime() - queryStartNanoTime);
+            long timeout = DatabaseDescriptor.getWriteRpcTimeout(NANOSECONDS) - (nanoTime() - queryStartNanoTime);
             if (!latch.await(timeout, NANOSECONDS))
                 throw new WriteTimeoutException(WriteType.CAS, consistency, getResponseCount(), targets);
         }
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosState.java b/src/java/org/apache/cassandra/service/paxos/PaxosState.java
index 6e02435..391d55f 100644
--- a/src/java/org/apache/cassandra/service/paxos/PaxosState.java
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosState.java
@@ -30,6 +30,8 @@ import org.apache.cassandra.db.*;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class PaxosState
 {
     private static final Striped<Lock> LOCKS = Striped.lazyWeakLock(DatabaseDescriptor.getConcurrentWriters() * 1024);
@@ -55,7 +57,7 @@ public class PaxosState
 
     public static PrepareResponse prepare(Commit toPrepare)
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             Lock lock = LOCKS.get(toPrepare.update.partitionKey());
@@ -89,14 +91,14 @@ public class PaxosState
         }
         finally
         {
-            Keyspace.open(toPrepare.update.metadata().keyspace).getColumnFamilyStore(toPrepare.update.metadata().id).metric.casPrepare.addNano(System.nanoTime() - start);
+            Keyspace.open(toPrepare.update.metadata().keyspace).getColumnFamilyStore(toPrepare.update.metadata().id).metric.casPrepare.addNano(nanoTime() - start);
         }
 
     }
 
     public static Boolean propose(Commit proposal)
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             Lock lock = LOCKS.get(proposal.update.partitionKey());
@@ -124,13 +126,13 @@ public class PaxosState
         }
         finally
         {
-            Keyspace.open(proposal.update.metadata().keyspace).getColumnFamilyStore(proposal.update.metadata().id).metric.casPropose.addNano(System.nanoTime() - start);
+            Keyspace.open(proposal.update.metadata().keyspace).getColumnFamilyStore(proposal.update.metadata().id).metric.casPropose.addNano(nanoTime() - start);
         }
     }
 
     public static void commit(Commit proposal)
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         try
         {
             // There is no guarantee we will see commits in the right order, because messages
@@ -155,7 +157,7 @@ public class PaxosState
         }
         finally
         {
-            Keyspace.open(proposal.update.metadata().keyspace).getColumnFamilyStore(proposal.update.metadata().id).metric.casCommit.addNano(System.nanoTime() - start);
+            Keyspace.open(proposal.update.metadata().keyspace).getColumnFamilyStore(proposal.update.metadata().id).metric.casCommit.addNano(nanoTime() - start);
         }
     }
 }
diff --git a/src/java/org/apache/cassandra/service/reads/DigestResolver.java b/src/java/org/apache/cassandra/service/reads/DigestResolver.java
index 475c8c2..f66edea 100644
--- a/src/java/org/apache/cassandra/service/reads/DigestResolver.java
+++ b/src/java/org/apache/cassandra/service/reads/DigestResolver.java
@@ -38,6 +38,7 @@ import org.apache.cassandra.service.reads.repair.NoopReadRepair;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static com.google.common.collect.Iterables.any;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class DigestResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> extends ResponseResolver<E, P>
 {
@@ -102,7 +103,7 @@ public class DigestResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRea
 
     public boolean responsesMatch()
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         // validate digests against each other; return false immediately on mismatch.
         ByteBuffer digest = null;
@@ -126,7 +127,7 @@ public class DigestResolver<E extends Endpoints<E>, P extends ReplicaPlan.ForRea
         }
 
         if (logger.isTraceEnabled())
-            logger.trace("responsesMatch: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+            logger.trace("responsesMatch: {} ms.", TimeUnit.NANOSECONDS.toMillis(nanoTime() - start));
 
         return true;
     }
diff --git a/src/java/org/apache/cassandra/service/reads/ReadCallback.java b/src/java/org/apache/cassandra/service/reads/ReadCallback.java
index 15f1559..4a83677 100644
--- a/src/java/org/apache/cassandra/service/reads/ReadCallback.java
+++ b/src/java/org/apache/cassandra/service/reads/ReadCallback.java
@@ -48,6 +48,7 @@ import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.concurrent.SimpleCondition;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class ReadCallback<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<E>> implements RequestCallback<ReadResponse>
 {
@@ -91,7 +92,7 @@ public class ReadCallback<E extends Endpoints<E>, P extends ReplicaPlan.ForRead<
 
     public boolean await(long timePastStart, TimeUnit unit)
     {
-        long time = unit.toNanos(timePastStart) - (System.nanoTime() - queryStartNanoTime);
+        long time = unit.toNanos(timePastStart) - (nanoTime() - queryStartNanoTime);
         try
         {
             return condition.await(time, TimeUnit.NANOSECONDS);
diff --git a/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java b/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java
index dd8b01d..60309bd 100644
--- a/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java
+++ b/src/java/org/apache/cassandra/service/reads/range/RangeCommandIterator.java
@@ -52,6 +52,8 @@ import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.AbstractIterator;
 import org.apache.cassandra.utils.CloseableIterator;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 class RangeCommandIterator extends AbstractIterator<RowIterator> implements PartitionIterator
 {
     private static final Logger logger = LoggerFactory.getLogger(RangeCommandIterator.class);
@@ -90,7 +92,7 @@ class RangeCommandIterator extends AbstractIterator<RowIterator> implements Part
         this.totalRangeCount = totalRangeCount;
         this.queryStartNanoTime = queryStartNanoTime;
 
-        startTime = System.nanoTime();
+        startTime = nanoTime();
         enforceStrictLiveness = command.metadata().enforceStrictLiveness();
     }
 
@@ -262,7 +264,7 @@ class RangeCommandIterator extends AbstractIterator<RowIterator> implements Part
         }
         finally
         {
-            long latency = System.nanoTime() - startTime;
+            long latency = nanoTime() - startTime;
             rangeMetrics.addNano(latency);
             Keyspace.openAndGetStore(command.metadata()).metric.coordinatorScanLatency.update(latency, TimeUnit.NANOSECONDS);
         }
diff --git a/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java b/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java
index edcf14d..194ad5b 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/BlockingPartitionRepair.java
@@ -53,6 +53,7 @@ import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.tracing.Tracing;
 
 import static org.apache.cassandra.net.Verb.*;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class BlockingPartitionRepair
         extends AbstractFuture<Object> implements RequestCallback<Object>
@@ -146,7 +147,7 @@ public class BlockingPartitionRepair
 
     public void sendInitialRepairs()
     {
-        mutationsSentTime = System.nanoTime();
+        mutationsSentTime = nanoTime();
         Replicas.assertFull(pendingRepairs.keySet());
 
         for (Map.Entry<Replica, Mutation> entry: pendingRepairs.entrySet())
@@ -177,7 +178,7 @@ public class BlockingPartitionRepair
     public boolean awaitRepairsUntil(long timeoutAt, TimeUnit timeUnit)
     {
         long timeoutAtNanos = timeUnit.toNanos(timeoutAt);
-        long remaining = timeoutAtNanos - System.nanoTime();
+        long remaining = timeoutAtNanos - nanoTime();
         try
         {
             return latch.await(remaining, TimeUnit.NANOSECONDS);
diff --git a/src/java/org/apache/cassandra/streaming/StreamSession.java b/src/java/org/apache/cassandra/streaming/StreamSession.java
index 3a32834..bbbe79d 100644
--- a/src/java/org/apache/cassandra/streaming/StreamSession.java
+++ b/src/java/org/apache/cassandra/streaming/StreamSession.java
@@ -52,6 +52,7 @@ import org.apache.cassandra.utils.NoSpamLogger;
 
 import static com.google.common.collect.Iterables.all;
 import static org.apache.cassandra.net.MessagingService.current_version;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Handles the streaming a one or more streams to and from a specific remote node.
@@ -790,14 +791,14 @@ public class StreamSession implements IEndpointStateChangeSubscriber
         // send back file received message
         messageSender.sendMessage(new ReceivedMessage(message.header.tableId, message.header.sequenceNumber));
         StreamHook.instance.reportIncomingStream(message.header.tableId, message.stream, this, message.header.sequenceNumber);
-        long receivedStartNanos = System.nanoTime();
+        long receivedStartNanos = nanoTime();
         try
         {
             receivers.get(message.header.tableId).received(message.stream);
         }
         finally
         {
-            long latencyNanos = System.nanoTime() - receivedStartNanos;
+            long latencyNanos = nanoTime() - receivedStartNanos;
             metrics.incomingProcessTime.update(latencyNanos, TimeUnit.NANOSECONDS);
             long latencyMs = TimeUnit.NANOSECONDS.toMillis(latencyNanos);
             int timeout = DatabaseDescriptor.getInternodeStreamingTcpUserTimeoutInMS();
diff --git a/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java b/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java
index fba56f5..a63db22 100644
--- a/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java
+++ b/src/java/org/apache/cassandra/streaming/async/NettyStreamingMessageSender.java
@@ -63,6 +63,8 @@ import org.apache.cassandra.streaming.messages.StreamMessage;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Responsible for sending {@link StreamMessage}s to a given peer. We manage an array of netty {@link Channel}s
  * for sending {@link OutgoingStreamMessage} instances; all other {@link StreamMessage} types are sent via
@@ -377,7 +379,7 @@ public class NettyStreamingMessageSender implements StreamingMessageSender
         boolean acquirePermit(int logInterval)
         {
             long logIntervalNanos = TimeUnit.MINUTES.toNanos(logInterval);
-            long timeOfLastLogging = System.nanoTime();
+            long timeOfLastLogging = nanoTime();
             while (true)
             {
                 if (closed)
@@ -388,7 +390,7 @@ public class NettyStreamingMessageSender implements StreamingMessageSender
                         return true;
 
                     // log a helpful message to operators in case they are wondering why a given session might not be making progress.
-                    long now = System.nanoTime();
+                    long now = nanoTime();
                     if (now - timeOfLastLogging > logIntervalNanos)
                     {
                         timeOfLastLogging = now;
diff --git a/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java b/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java
index 3e12c2a..e504dae 100644
--- a/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java
+++ b/src/java/org/apache/cassandra/streaming/management/StreamEventJMXNotifier.java
@@ -23,6 +23,8 @@ import javax.management.NotificationBroadcasterSupport;
 
 import org.apache.cassandra.streaming.*;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  */
 public class StreamEventJMXNotifier extends NotificationBroadcasterSupport implements StreamEventHandler
@@ -53,14 +55,14 @@ public class StreamEventJMXNotifier extends NotificationBroadcasterSupport imple
                 break;
             case FILE_PROGRESS:
                 ProgressInfo progress = ((StreamEvent.ProgressEvent) event).progress;
-                long current = System.currentTimeMillis();
+                long current = currentTimeMillis();
                 if (current - progressLastSent >= PROGRESS_NOTIFICATION_INTERVAL || progress.isCompleted())
                 {
                     notif = new Notification(StreamEvent.ProgressEvent.class.getCanonicalName(),
                                              StreamManagerMBean.OBJECT_NAME,
                                              seq.getAndIncrement());
                     notif.setUserData(ProgressInfoCompositeData.toCompositeData(event.planId, progress));
-                    progressLastSent = System.currentTimeMillis();
+                    progressLastSent = currentTimeMillis();
                 }
                 else
                 {
diff --git a/src/java/org/apache/cassandra/tools/BootstrapMonitor.java b/src/java/org/apache/cassandra/tools/BootstrapMonitor.java
index 9719192..67d2925 100644
--- a/src/java/org/apache/cassandra/tools/BootstrapMonitor.java
+++ b/src/java/org/apache/cassandra/tools/BootstrapMonitor.java
@@ -27,6 +27,8 @@ import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventType;
 import org.apache.cassandra.utils.progress.jmx.JMXNotificationProgressListener;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class BootstrapMonitor extends JMXNotificationProgressListener
 {
     private final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
@@ -74,7 +76,7 @@ public class BootstrapMonitor extends JMXNotificationProgressListener
     public void progress(String tag, ProgressEvent event)
     {
         ProgressEventType type = event.getType();
-        String message = String.format("[%s] %s", format.format(System.currentTimeMillis()), event.getMessage());
+        String message = String.format("[%s] %s", format.format(currentTimeMillis()), event.getMessage());
         if (type == ProgressEventType.PROGRESS)
         {
             message = message + " (progress: " + (int)event.getProgressPercentage() + "%)";
diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java
index c1de1ff..bb29bbe 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoader.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoader.java
@@ -43,6 +43,8 @@ import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NativeSSTableLoaderClient;
 import org.apache.cassandra.utils.OutputHandler;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class BulkLoader
 {
     public static void main(String args[]) throws BulkLoadException
@@ -131,7 +133,7 @@ public class BulkLoader
 
         public ProgressIndicator()
         {
-            start = lastTime = System.nanoTime();
+            start = lastTime = nanoTime();
         }
 
         public void onSuccess(StreamState finalState)
@@ -157,7 +159,7 @@ public class BulkLoader
                     progressInfo = ((StreamEvent.ProgressEvent) event).progress;
                 }
 
-                long time = System.nanoTime();
+                long time = nanoTime();
                 long deltaTime = time - lastTime;
 
                 StringBuilder sb = new StringBuilder();
@@ -230,7 +232,7 @@ public class BulkLoader
 
         private void printSummary(int connectionsPerHost)
         {
-            long end = System.nanoTime();
+            long end = nanoTime();
             long durationMS = ((end - start) / (1000000));
 
             StringBuilder sb = new StringBuilder();
diff --git a/src/java/org/apache/cassandra/tools/JsonTransformer.java b/src/java/org/apache/cassandra/tools/JsonTransformer.java
index 341512c..ed0830d 100644
--- a/src/java/org/apache/cassandra/tools/JsonTransformer.java
+++ b/src/java/org/apache/cassandra/tools/JsonTransformer.java
@@ -58,6 +58,8 @@ import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public final class JsonTransformer
 {
 
@@ -282,7 +284,7 @@ public final class JsonTransformer
                     json.writeFieldName("expires_at");
                     json.writeString(dateString(TimeUnit.SECONDS, liveInfo.localExpirationTime()));
                     json.writeFieldName("expired");
-                    json.writeBoolean(liveInfo.localExpirationTime() < (System.currentTimeMillis() / 1000));
+                    json.writeBoolean(liveInfo.localExpirationTime() < (currentTimeMillis() / 1000));
                 }
                 json.writeEndObject();
                 objectIndenter.setCompact(false);
@@ -497,7 +499,7 @@ public final class JsonTransformer
                 json.writeFieldName("expires_at");
                 json.writeString(dateString(TimeUnit.SECONDS, cell.localDeletionTime()));
                 json.writeFieldName("expired");
-                json.writeBoolean(!cell.isLive((int) (System.currentTimeMillis() / 1000)));
+                json.writeBoolean(!cell.isLive((int) (currentTimeMillis() / 1000)));
             }
             json.writeEndObject();
             objectIndenter.setCompact(false);
diff --git a/src/java/org/apache/cassandra/tools/RepairRunner.java b/src/java/org/apache/cassandra/tools/RepairRunner.java
index 593bc26..e7ac831 100644
--- a/src/java/org/apache/cassandra/tools/RepairRunner.java
+++ b/src/java/org/apache/cassandra/tools/RepairRunner.java
@@ -35,6 +35,8 @@ import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressEventType;
 import org.apache.cassandra.utils.progress.jmx.JMXNotificationProgressListener;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class RepairRunner extends JMXNotificationProgressListener
 {
     private final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
@@ -189,6 +191,6 @@ public class RepairRunner extends JMXNotificationProgressListener
 
     private void printMessage(String message)
     {
-        out.println(String.format("[%s] %s", this.format.format(System.currentTimeMillis()), message));
+        out.println(String.format("[%s] %s", this.format.format(currentTimeMillis()), message));
     }
 }
diff --git a/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java b/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java
index 56c57d9..62a018c 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java
@@ -35,6 +35,8 @@ import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * During compaction we can drop entire sstables if they only contain expired tombstones and if it is guaranteed
  * to not cover anything in other sstables. An expired sstable can be blocked from getting dropped if its newest
@@ -87,7 +89,7 @@ public class SSTableExpiredBlockers
             System.exit(1);
         }
 
-        int gcBefore = (int)(System.currentTimeMillis()/1000) - metadata.params.gcGraceSeconds;
+        int gcBefore = (int)(currentTimeMillis() / 1000) - metadata.params.gcGraceSeconds;
         Multimap<SSTableReader, SSTableReader> blockers = checkForExpiredSSTableBlockers(sstables, gcBefore);
         for (SSTableReader blocker : blockers.keySet())
         {
diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
index a4da97c..8c1f5db 100755
--- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
+++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
@@ -21,6 +21,7 @@ import static org.apache.cassandra.tools.Util.BLUE;
 import static org.apache.cassandra.tools.Util.CYAN;
 import static org.apache.cassandra.tools.Util.RESET;
 import static org.apache.cassandra.tools.Util.WHITE;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.commons.lang3.time.DurationFormatUtils.formatDurationWords;
 
 import java.io.DataInputStream;
@@ -232,9 +233,9 @@ public class SSTableMetadataViewer
                                     cellCount++;
                                     double percentComplete = Math.min(1.0, cellCount / totalCells);
                                     if (lastPercent != (int) (percentComplete * 100) &&
-                                        (System.currentTimeMillis() - lastPercentTime) > 1000)
+                                        (currentTimeMillis() - lastPercentTime) > 1000)
                                     {
-                                        lastPercentTime = System.currentTimeMillis();
+                                        lastPercentTime = currentTimeMillis();
                                         lastPercent = (int) (percentComplete * 100);
                                         if (color)
                                             out.printf("\r%sAnalyzing SSTable...  %s%s %s(%%%s)", BLUE, CYAN,
@@ -371,7 +372,7 @@ public class SSTableMetadataViewer
                 field("maxClusteringValues", Arrays.toString(maxValues));
             }
             field("Estimated droppable tombstones",
-                  stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000) - this.gc));
+                  stats.getEstimatedDroppableTombstoneRatio((int) (currentTimeMillis() / 1000) - this.gc));
             field("SSTable Level", stats.sstableLevel);
             field("Repaired at", stats.repairedAt, toDateString(stats.repairedAt, TimeUnit.MILLISECONDS));
             field("Pending repair", stats.pendingRepair);
diff --git a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
index bd71c64..4dfa4ab 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
@@ -55,6 +55,8 @@ import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.OutputHandler;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class StandaloneScrubber
 {
     public static final String REINSERT_OVERFLOWED_TTL_OPTION_DESCRIPTION = "Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 with " +
@@ -108,7 +110,7 @@ public class StandaloneScrubber
                                                                   options.keyspaceName,
                                                                   options.cfName));
 
-            String snapshotName = "pre-scrub-" + System.currentTimeMillis();
+            String snapshotName = "pre-scrub-" + currentTimeMillis();
 
             OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
             Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
diff --git a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
index e15e5bc..e3c80f1 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
@@ -38,6 +38,7 @@ import org.apache.cassandra.io.sstable.*;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
 import static org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class StandaloneSplitter
 {
@@ -115,7 +116,7 @@ public class StandaloneSplitter
             // Do not load sstables since they might be broken
             Keyspace keyspace = Keyspace.openWithoutSSTables(ksName);
             ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
-            String snapshotName = "pre-split-" + System.currentTimeMillis();
+            String snapshotName = "pre-split-" + currentTimeMillis();
 
             List<SSTableReader> sstables = new ArrayList<>();
             for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet())
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java
index f57708a..ef308f7 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.tools.nodetool;
 
 import static com.google.common.collect.Iterables.toArray;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.commons.lang3.StringUtils.join;
 import io.airlift.airline.Arguments;
 import io.airlift.airline.Command;
@@ -44,7 +45,7 @@ public class Snapshot extends NodeToolCmd
     private String table = null;
 
     @Option(title = "tag", name = {"-t", "--tag"}, description = "The name of the snapshot")
-    private String snapshotName = Long.toString(System.currentTimeMillis());
+    private String snapshotName = Long.toString(currentTimeMillis());
 
     @Option(title = "ktlist", name = { "-kt", "--kt-list", "-kc", "--kc.list" }, description = "The list of Keyspace.table to take snapshot.(you must not specify only keyspace)")
     private String ktList = null;
diff --git a/src/java/org/apache/cassandra/tracing/TraceStateImpl.java b/src/java/org/apache/cassandra/tracing/TraceStateImpl.java
index 48f193c..d9077e2 100644
--- a/src/java/org/apache/cassandra/tracing/TraceStateImpl.java
+++ b/src/java/org/apache/cassandra/tracing/TraceStateImpl.java
@@ -39,6 +39,10 @@ import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.WrappedRunnable;
 
+import static java.util.Collections.singletonList;
+import static org.apache.cassandra.db.ConsistencyLevel.ANY;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * ThreadLocal state for a tracing session. The presence of an instance of this class as a ThreadLocal denotes that an
  * operation is being traced.
@@ -118,7 +122,7 @@ public class TraceStateImpl extends TraceState
     {
         try
         {
-            StorageProxy.mutate(Collections.singletonList(mutation), ConsistencyLevel.ANY, System.nanoTime());
+            StorageProxy.mutate(singletonList(mutation), ANY, nanoTime());
         }
         catch (OverloadedException e)
         {
diff --git a/src/java/org/apache/cassandra/tracing/TracingImpl.java b/src/java/org/apache/cassandra/tracing/TracingImpl.java
index c786fa2..9192aca 100644
--- a/src/java/org/apache/cassandra/tracing/TracingImpl.java
+++ b/src/java/org/apache/cassandra/tracing/TracingImpl.java
@@ -28,6 +28,8 @@ import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.WrappedRunnable;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 
 /**
  * A trace session context. Able to track and store trace sessions. A session is usually a user initiated query, and may
@@ -55,7 +57,7 @@ class TracingImpl extends Tracing
         final TraceStateImpl state = getStateImpl();
         assert state != null;
 
-        final long startedAt = System.currentTimeMillis();
+        final long startedAt = currentTimeMillis();
         final ByteBuffer sessionId = state.sessionIdBytes;
         final String command = state.traceType.toString();
         final int ttl = state.ttl;
diff --git a/src/java/org/apache/cassandra/transport/Dispatcher.java b/src/java/org/apache/cassandra/transport/Dispatcher.java
index 29d4748..1b141cd 100644
--- a/src/java/org/apache/cassandra/transport/Dispatcher.java
+++ b/src/java/org/apache/cassandra/transport/Dispatcher.java
@@ -44,6 +44,7 @@ import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.NoSpamLogger;
 
 import static org.apache.cassandra.concurrent.SharedExecutorPool.SHARED;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class Dispatcher
 {
@@ -85,7 +86,7 @@ public class Dispatcher
      */
     static Message.Response processRequest(ServerConnection connection, Message.Request request, Overload backpressure)
     {
-        long queryStartNanoTime = System.nanoTime();
+        long queryStartNanoTime = nanoTime();
         if (connection.getVersion().isGreaterOrEqualTo(ProtocolVersion.V4))
             ClientWarn.instance.captureWarnings();
 
diff --git a/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java b/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java
index f289377..3d6e900 100644
--- a/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java
+++ b/src/java/org/apache/cassandra/transport/ProtocolVersionTracker.java
@@ -26,6 +26,8 @@ import com.github.benmanes.caffeine.cache.Cache;
 import com.github.benmanes.caffeine.cache.Caffeine;
 import com.github.benmanes.caffeine.cache.LoadingCache;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * This class tracks the last 100 connections per protocol version
  */
@@ -47,13 +49,13 @@ public class ProtocolVersionTracker
         for (ProtocolVersion version : ProtocolVersion.values())
         {
             clientsByProtocolVersion.put(version, Caffeine.newBuilder().maximumSize(capacity)
-                                                          .build(key -> System.currentTimeMillis()));
+                                                          .build(key -> currentTimeMillis()));
         }
     }
 
     void addConnection(InetAddress addr, ProtocolVersion version)
     {
-        clientsByProtocolVersion.get(version).put(addr, System.currentTimeMillis());
+        clientsByProtocolVersion.get(version).put(addr, currentTimeMillis());
     }
 
     List<ClientStat> getAll()
diff --git a/src/java/org/apache/cassandra/transport/SimpleClient.java b/src/java/org/apache/cassandra/transport/SimpleClient.java
index 87e54a1..7e8e3e2 100644
--- a/src/java/org/apache/cassandra/transport/SimpleClient.java
+++ b/src/java/org/apache/cassandra/transport/SimpleClient.java
@@ -54,6 +54,8 @@ import static org.apache.cassandra.transport.CQLMessageHandler.envelopeSize;
 import static org.apache.cassandra.transport.Flusher.MAX_FRAMED_PAYLOAD_SIZE;
 import static org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter.NO_OP_LIMITER;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class SimpleClient implements Closeable
 {
 
@@ -316,10 +318,10 @@ public class SimpleClient implements Closeable
                 }
                 lastWriteFuture = channel.writeAndFlush(requests);
 
-                long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(TIMEOUT_SECONDS);
+                long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(TIMEOUT_SECONDS);
                 for (int i = 0; i < requests.size(); i++)
                 {
-                    Message.Response msg = responseHandler.responses.poll(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+                    Message.Response msg = responseHandler.responses.poll(deadline - currentTimeMillis(), TimeUnit.MILLISECONDS);
                     if (msg == null)
                         throw new RuntimeException("timeout");
                     if (msg instanceof ErrorMessage)
diff --git a/src/java/org/apache/cassandra/transport/messages/BatchMessage.java b/src/java/org/apache/cassandra/transport/messages/BatchMessage.java
index 59b5b53..3a5cffa 100644
--- a/src/java/org/apache/cassandra/transport/messages/BatchMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/BatchMessage.java
@@ -46,6 +46,8 @@ import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MD5Digest;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class BatchMessage extends Message.Request
 {
     public static final Message.Codec<BatchMessage> codec = new Message.Codec<BatchMessage>()
@@ -223,7 +225,7 @@ public class BatchMessage extends Message.Request
             // (and no value would be really correct, so we prefer passing a clearly wrong one).
             BatchStatement batch = new BatchStatement(batchType, VariableSpecifications.empty(), statements, Attributes.none());
 
-            long queryTime = System.currentTimeMillis();
+            long queryTime = currentTimeMillis();
             Message.Response response = handler.processBatch(batch, state, batchOptions, getCustomPayload(), queryStartNanoTime);
             if (queries != null)
                 QueryEvents.instance.notifyBatchSuccess(batchType, statements, queries, values, options, state, queryTime, response);
diff --git a/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java b/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java
index 05186fb..9c9fe2a 100644
--- a/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/ExecuteMessage.java
@@ -37,6 +37,8 @@ import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.MD5Digest;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class ExecuteMessage extends Message.Request
 {
     public static final Message.Codec<ExecuteMessage> codec = new Message.Codec<ExecuteMessage>()
@@ -139,7 +141,7 @@ public class ExecuteMessage extends Message.Request
             // by wrapping the QueryOptions.
             QueryOptions queryOptions = QueryOptions.addColumnSpecifications(options, prepared.statement.getBindVariables());
 
-            long requestStartTime = System.currentTimeMillis();
+            long requestStartTime = currentTimeMillis();
 
             Message.Response response = handler.processPrepared(statement, state, queryOptions, getCustomPayload(), queryStartNanoTime);
 
diff --git a/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java b/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java
index fa77c68..5bf058c 100644
--- a/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/PrepareMessage.java
@@ -32,6 +32,8 @@ import org.apache.cassandra.transport.Message;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 public class PrepareMessage extends Message.Request
 {
     public static final Message.Codec<PrepareMessage> codec = new Message.Codec<PrepareMessage>()
@@ -110,7 +112,7 @@ public class PrepareMessage extends Message.Request
 
             ClientState clientState = state.getClientState().cloneWithKeyspaceIfSet(keyspace);
             QueryHandler queryHandler = ClientState.getCQLQueryHandler();
-            long queryTime = System.currentTimeMillis();
+            long queryTime = currentTimeMillis();
             ResultMessage.Prepared response = queryHandler.prepare(query, clientState, getCustomPayload());
             QueryEvents.instance.notifyPrepareSuccess(() -> queryHandler.getPrepared(response.statementId), query, state, queryTime, response);
             return response;
diff --git a/src/java/org/apache/cassandra/transport/messages/QueryMessage.java b/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
index 4d5d1e1..9a296e4 100644
--- a/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
@@ -35,6 +35,8 @@ import org.apache.cassandra.transport.ProtocolException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * A CQL query
  */
@@ -107,7 +109,7 @@ public class QueryMessage extends Message.Request
             if (traceRequest)
                 traceQuery(state);
 
-            long queryStartTime = System.currentTimeMillis();
+            long queryStartTime = currentTimeMillis();
 
             QueryHandler queryHandler = ClientState.getCQLQueryHandler();
             statement = queryHandler.parse(query, state, options);
diff --git a/src/java/org/apache/cassandra/utils/ApproximateTime.java b/src/java/org/apache/cassandra/utils/ApproximateTime.java
deleted file mode 100644
index 32b6e44..0000000
--- a/src/java/org/apache/cassandra/utils/ApproximateTime.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.concurrent.ScheduledExecutors;
-import org.apache.cassandra.config.Config;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.cassandra.utils.ApproximateTime.Measurement.ALMOST_NOW;
-import static org.apache.cassandra.utils.ApproximateTime.Measurement.ALMOST_SAME_TIME;
-
-/**
- * This class provides approximate time utilities:
- *   - An imprecise nanoTime (monotonic) and currentTimeMillis (non-monotonic), that are faster than their regular counterparts
- *     They have a configured approximate precision (default of 10ms), which is the cadence they will be updated if the system is healthy
- *   - A mechanism for converting between nanoTime and currentTimeMillis measurements.
- *     These conversions may have drifted, and they offer no absolute guarantees on precision
- */
-public class ApproximateTime
-{
-    private static final Logger logger = LoggerFactory.getLogger(ApproximateTime.class);
-    private static final int ALMOST_NOW_UPDATE_INTERVAL_MS = Math.max(1, Integer.parseInt(System.getProperty(Config.PROPERTY_PREFIX + "approximate_time_precision_ms", "2")));
-    private static final String CONVERSION_UPDATE_INTERVAL_PROPERTY = Config.PROPERTY_PREFIX + "NANOTIMETOMILLIS_TIMESTAMP_UPDATE_INTERVAL";
-    private static final long ALMOST_SAME_TIME_UPDATE_INTERVAL_MS = Long.getLong(CONVERSION_UPDATE_INTERVAL_PROPERTY, 10000);
-
-    public static class AlmostSameTime
-    {
-        final long millis;
-        final long nanos;
-        final long error; // maximum error of millis measurement (in nanos)
-
-        private AlmostSameTime(long millis, long nanos, long error)
-        {
-            this.millis = millis;
-            this.nanos = nanos;
-            this.error = error;
-        }
-
-        public long toCurrentTimeMillis(long nanoTime)
-        {
-            return millis + TimeUnit.NANOSECONDS.toMillis(nanoTime - nanos);
-        }
-
-        public long toNanoTime(long currentTimeMillis)
-        {
-            return nanos + MILLISECONDS.toNanos(currentTimeMillis - millis);
-        }
-    }
-
-    public enum Measurement { ALMOST_NOW, ALMOST_SAME_TIME }
-
-    private static volatile Future<?> almostNowUpdater;
-    private static volatile Future<?> almostSameTimeUpdater;
-
-    private static volatile long almostNowMillis;
-    private static volatile long almostNowNanos;
-
-    private static volatile AlmostSameTime almostSameTime = new AlmostSameTime(0L, 0L, Long.MAX_VALUE);
-    private static double failedAlmostSameTimeUpdateModifier = 1.0;
-
-    private static final Runnable refreshAlmostNow = () -> {
-        almostNowMillis = System.currentTimeMillis();
-        almostNowNanos = System.nanoTime();
-    };
-
-    private static final Runnable refreshAlmostSameTime = () -> {
-        final int tries = 3;
-        long[] samples = new long[2 * tries + 1];
-        samples[0] = System.nanoTime();
-        for (int i = 1 ; i < samples.length ; i += 2)
-        {
-            samples[i] = System.currentTimeMillis();
-            samples[i + 1] = System.nanoTime();
-        }
-
-        int best = 1;
-        // take sample with minimum delta between calls
-        for (int i = 3 ; i < samples.length - 1 ; i += 2)
-        {
-            if ((samples[i+1] - samples[i-1]) < (samples[best+1]-samples[best-1]))
-                best = i;
-        }
-
-        long millis = samples[best];
-        long nanos = (samples[best+1] / 2) + (samples[best-1] / 2);
-        long error = (samples[best+1] / 2) - (samples[best-1] / 2);
-
-        AlmostSameTime prev = almostSameTime;
-        AlmostSameTime next = new AlmostSameTime(millis, nanos, error);
-
-        if (next.error > prev.error && next.error > prev.error * failedAlmostSameTimeUpdateModifier)
-        {
-            failedAlmostSameTimeUpdateModifier *= 1.1;
-            return;
-        }
-
-        failedAlmostSameTimeUpdateModifier = 1.0;
-        almostSameTime = next;
-    };
-
-    static
-    {
-        start(ALMOST_NOW);
-        start(ALMOST_SAME_TIME);
-    }
-
-    public static synchronized void stop(Measurement measurement)
-    {
-        switch (measurement)
-        {
-            case ALMOST_NOW:
-                almostNowUpdater.cancel(true);
-                try { almostNowUpdater.get(); } catch (Throwable t) { }
-                almostNowUpdater = null;
-                break;
-            case ALMOST_SAME_TIME:
-                almostSameTimeUpdater.cancel(true);
-                try { almostSameTimeUpdater.get(); } catch (Throwable t) { }
-                almostSameTimeUpdater = null;
-                break;
-        }
-    }
-
-    public static synchronized void start(Measurement measurement)
-    {
-        switch (measurement)
-        {
-            case ALMOST_NOW:
-                if (almostNowUpdater != null)
-                    throw new IllegalStateException("Already running");
-                refreshAlmostNow.run();
-                logger.info("Scheduling approximate time-check task with a precision of {} milliseconds", ALMOST_NOW_UPDATE_INTERVAL_MS);
-                almostNowUpdater = ScheduledExecutors.scheduledFastTasks.scheduleWithFixedDelay(refreshAlmostNow, ALMOST_NOW_UPDATE_INTERVAL_MS, ALMOST_NOW_UPDATE_INTERVAL_MS, MILLISECONDS);
-                break;
-            case ALMOST_SAME_TIME:
-                if (almostSameTimeUpdater != null)
-                    throw new IllegalStateException("Already running");
-                refreshAlmostSameTime.run();
-                logger.info("Scheduling approximate time conversion task with an interval of {} milliseconds", ALMOST_SAME_TIME_UPDATE_INTERVAL_MS);
-                almostSameTimeUpdater = ScheduledExecutors.scheduledFastTasks.scheduleWithFixedDelay(refreshAlmostSameTime, ALMOST_SAME_TIME_UPDATE_INTERVAL_MS, ALMOST_SAME_TIME_UPDATE_INTERVAL_MS, MILLISECONDS);
-                break;
-        }
-    }
-
-
-    /**
-     * Request an immediate refresh; this shouldn't generally be invoked, except perhaps by tests
-     */
-    @VisibleForTesting
-    public static synchronized void refresh(Measurement measurement)
-    {
-        stop(measurement);
-        start(measurement);
-    }
-
-    /** no guarantees about relationship to nanoTime; non-monotonic (tracks currentTimeMillis as closely as possible) */
-    public static long currentTimeMillis()
-    {
-        return almostNowMillis;
-    }
-
-    /** no guarantees about relationship to currentTimeMillis; monotonic */
-    public static long nanoTime()
-    {
-        return almostNowNanos;
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/Clock.java b/src/java/org/apache/cassandra/utils/Clock.java
new file mode 100644
index 0000000..9dd6dd4
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/Clock.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Wrapper around time related functions that are either implemented by using the default JVM calls
+ * or by using a custom implementation for testing purposes.
+ *
+ * See {@link Global#instance} for how to use a custom implementation.
+ *
+ * Please note that {@link java.time.Clock} wasn't used, as it would not be possible to provide an
+ * implementation for {@link #nanoTime()} with the exact same properties of {@link System#nanoTime()}.
+ */
+public interface Clock
+{
+    public static class Global
+    {
+        private static final Logger logger = LoggerFactory.getLogger(Clock.class);
+
+        /**
+         * Static singleton object that will be instantiated by default with a system clock
+         * implementation. Set <code>cassandra.clock</code> system property to a FQCN to use a
+         * different implementation instead.
+         */
+        private static final Clock instance;
+
+        static
+        {
+            String classname = System.getProperty("cassandra.clock");
+            Clock clock = new Default();
+            if (classname != null)
+            {
+                try
+                {
+                    logger.debug("Using custom clock implementation: {}", classname);
+                    clock = (Clock) Class.forName(classname).newInstance();
+                }
+                catch (Exception e)
+                {
+                    logger.error("Failed to load clock implementation {}", classname, e);
+                }
+            }
+            instance = clock;
+        }
+
+        /**
+         * Semantically equivalent to {@link System#nanoTime()}
+         */
+        public static long nanoTime()
+        {
+            return instance.nanoTime();
+        }
+
+        /**
+         * Semantically equivalent to {@link System#currentTimeMillis()}
+         */
+        public static long currentTimeMillis()
+        {
+            return instance.currentTimeMillis();
+        }
+    }
+
+    public static class Default implements Clock
+    {
+        /**
+         * {@link System#nanoTime()}
+         */
+        public long nanoTime()
+        {
+            return System.nanoTime();
+        }
+
+        /**
+         * {@link System#currentTimeMillis()}
+         */
+        public long currentTimeMillis()
+        {
+            return System.currentTimeMillis();
+        }
+    }
+
+    /**
+     * Semantically equivalent to {@link System#nanoTime()}
+     */
+    public long nanoTime();
+
+    /**
+     * Semantically equivalent to {@link System#currentTimeMillis()}
+     */
+    public long currentTimeMillis();
+
+}
diff --git a/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java b/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java
index d1f33ed..0c80d06 100644
--- a/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java
+++ b/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java
@@ -37,7 +37,8 @@ import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.schema.TableId;
 import org.apache.cassandra.schema.TableMetadata;
-import org.hsqldb.Table;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Provides a means to take snapshots when triggered by anomalous events or when the breaking of invariants is
@@ -119,7 +120,7 @@ public class DiagnosticSnapshotService
 
     private void maybeTriggerSnapshot(TableMetadata metadata, String prefix, Iterable<InetAddressAndPort> endpoints)
     {
-        long now = System.nanoTime();
+        long now = nanoTime();
         AtomicLong cached = lastSnapshotTimes.computeIfAbsent(metadata.id, u -> new AtomicLong(0));
         long last = cached.get();
         long interval = Long.getLong("cassandra.diagnostic_snapshot_interval_nanos", SNAPSHOT_INTERVAL_NANOS);
diff --git a/src/java/org/apache/cassandra/utils/ExecutorUtils.java b/src/java/org/apache/cassandra/utils/ExecutorUtils.java
index 21933a3..9ef1df8 100644
--- a/src/java/org/apache/cassandra/utils/ExecutorUtils.java
+++ b/src/java/org/apache/cassandra/utils/ExecutorUtils.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeoutException;
 import org.apache.cassandra.concurrent.InfiniteLoopExecutor;
 
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class ExecutorUtils
 {
@@ -92,7 +93,7 @@ public class ExecutorUtils
 
     public static void awaitTermination(long timeout, TimeUnit unit, Collection<?> executors) throws InterruptedException, TimeoutException
     {
-        long deadline = System.nanoTime() + unit.toNanos(timeout);
+        long deadline = nanoTime() + unit.toNanos(timeout);
         awaitTerminationUntil(deadline, executors);
     }
 
@@ -100,7 +101,7 @@ public class ExecutorUtils
     {
         for (Object executor : executors)
         {
-            long wait = deadline - System.nanoTime();
+            long wait = deadline - nanoTime();
             if (executor instanceof ExecutorService)
             {
                 if (wait <= 0 || !((ExecutorService)executor).awaitTermination(wait, NANOSECONDS))
diff --git a/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java b/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
index 1736ae2..2cba64d 100644
--- a/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
+++ b/src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
@@ -25,6 +25,8 @@ import java.util.function.Supplier;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * An implementation similar to Guava's Suppliers.memoizeWithExpiration(Supplier)
  * but allowing for memoization to be skipped.
@@ -59,7 +61,7 @@ public class ExpiringMemoizingSupplier<T> implements Supplier<T>
         // the extra memory consumption and indirection are more
         // expensive than the extra volatile reads.
         long nanos = this.expirationNanos;
-        long now = System.nanoTime();
+        long now = nanoTime();
         if (nanos == 0L || now - nanos >= 0L) {
             synchronized(this) {
                 if (nanos == this.expirationNanos) {
diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java
index c2a3c83..bb417a5 100644
--- a/src/java/org/apache/cassandra/utils/FBUtilities.java
+++ b/src/java/org/apache/cassandra/utils/FBUtilities.java
@@ -69,6 +69,8 @@ import org.apache.cassandra.locator.InetAddressAndPort;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.LINE_SEPARATOR;
 import static org.apache.cassandra.config.CassandraRelevantProperties.USER_HOME;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 
 public class FBUtilities
@@ -426,12 +428,12 @@ public class FBUtilities
     {
         // we use microsecond resolution for compatibility with other client libraries, even though
         // we can't actually get microsecond precision.
-        return System.currentTimeMillis() * 1000;
+        return currentTimeMillis() * 1000;
     }
 
     public static int nowInSeconds()
     {
-        return (int) (System.currentTimeMillis() / 1000);
+        return (int) (currentTimeMillis() / 1000);
     }
 
     public static <T> List<T> waitOnFutures(Iterable<? extends Future<? extends T>> futures)
@@ -451,7 +453,7 @@ public class FBUtilities
     {
         long endNanos = 0;
         if (timeout > 0)
-            endNanos = System.nanoTime() + units.toNanos(timeout);
+            endNanos = nanoTime() + units.toNanos(timeout);
         List<T> results = new ArrayList<>();
         Throwable fail = null;
         for (Future<? extends T> f : futures)
@@ -464,7 +466,7 @@ public class FBUtilities
                 }
                 else
                 {
-                    long waitFor = Math.max(1, endNanos - System.nanoTime());
+                    long waitFor = Math.max(1, endNanos - nanoTime());
                     results.add(f.get(waitFor, TimeUnit.NANOSECONDS));
                 }
             }
@@ -556,10 +558,10 @@ public class FBUtilities
             public List get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
             {
                 List result = new ArrayList<>(futures.size());
-                long deadline = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, unit);
+                long deadline = nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, unit);
                 for (Future current : futures)
                 {
-                    long remaining = deadline - System.nanoTime();
+                    long remaining = deadline - nanoTime();
                     if (remaining <= 0)
                         throw new TimeoutException();
 
diff --git a/src/java/org/apache/cassandra/utils/GuidGenerator.java b/src/java/org/apache/cassandra/utils/GuidGenerator.java
index aa3ee5b..46843b4 100644
--- a/src/java/org/apache/cassandra/utils/GuidGenerator.java
+++ b/src/java/org/apache/cassandra/utils/GuidGenerator.java
@@ -22,6 +22,7 @@ import java.security.SecureRandom;
 import java.util.Random;
 
 import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_SECURITY_EGD;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 
 public class GuidGenerator
 {
@@ -90,7 +91,7 @@ public class GuidGenerator
 
     public static ByteBuffer guidAsBytes()
     {
-        return guidAsBytes(myRand, s_id, System.currentTimeMillis());
+        return guidAsBytes(myRand, s_id, currentTimeMillis());
     }
 
     /*
diff --git a/src/java/org/apache/cassandra/utils/MonotonicClock.java b/src/java/org/apache/cassandra/utils/MonotonicClock.java
index 5a1aa3c..93d5ac5 100644
--- a/src/java/org/apache/cassandra/utils/MonotonicClock.java
+++ b/src/java/org/apache/cassandra/utils/MonotonicClock.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.Config;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 /**
  * Wrapper around time related functions that are either implemented by using the default JVM calls
@@ -38,6 +39,8 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
  *
  * Please note that {@link java.time.Clock} wasn't used, as it would not be possible to provide an
  * implementation for {@link #now()} with the exact same properties of {@link System#nanoTime()}.
+ *
+ * TODO better rationalise MonotonicClock/Clock
  */
 public interface MonotonicClock
 {
@@ -75,9 +78,7 @@ public interface MonotonicClock
 
         private static MonotonicClock precise()
         {
-            String sclock = System.getProperty("cassandra.clock");
-            if (sclock == null)
-                sclock = System.getProperty("cassandra.monotonic_clock.precise");
+            String sclock = System.getProperty("cassandra.monotonic_clock.precise");
 
             if (sclock != null)
             {
@@ -204,7 +205,7 @@ public interface MonotonicClock
         {
             final int tries = 3;
             long[] samples = new long[2 * tries + 1];
-            samples[0] = System.nanoTime();
+            samples[0] = nanoTime();
             for (int i = 1 ; i < samples.length ; i += 2)
             {
                 samples[i] = millisSinceEpoch.getAsLong();
@@ -241,13 +242,13 @@ public interface MonotonicClock
     {
         private SystemClock()
         {
-            super(System::currentTimeMillis);
+            super(Clock.Global::currentTimeMillis);
         }
 
         @Override
         public long now()
         {
-            return System.nanoTime();
+            return nanoTime();
         }
 
         @Override
diff --git a/src/java/org/apache/cassandra/utils/NoSpamLogger.java b/src/java/org/apache/cassandra/utils/NoSpamLogger.java
index ac9168a..5a05a47 100644
--- a/src/java/org/apache/cassandra/utils/NoSpamLogger.java
+++ b/src/java/org/apache/cassandra/utils/NoSpamLogger.java
@@ -25,6 +25,8 @@ import org.slf4j.Logger;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import static org.apache.cassandra.utils.Clock.Global;
+
 /**
  * Logging that limits each log statement to firing based on time since the statement last fired.
  *
@@ -58,7 +60,7 @@ public class NoSpamLogger
     {
         public long nanoTime()
         {
-            return System.nanoTime();
+            return Global.nanoTime();
         }
     };
 
diff --git a/src/java/org/apache/cassandra/utils/SlidingTimeRate.java b/src/java/org/apache/cassandra/utils/SlidingTimeRate.java
deleted file mode 100644
index 0e00054..0000000
--- a/src/java/org/apache/cassandra/utils/SlidingTimeRate.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * Concurrent rate computation over a sliding time window.
- *
- * Currently not used in the Cassandra 4.0 code base. If you decide to use it, please check CASSANDRA-16713.
- * There still might be a bug, flaky test to be fixed before using it again.
- */
-public class SlidingTimeRate
-{
-    private final ConcurrentSkipListMap<Long, AtomicInteger> counters = new ConcurrentSkipListMap<>();
-    private final AtomicLong lastCounterTimestamp = new AtomicLong(0);
-    private final ReadWriteLock pruneLock = new ReentrantReadWriteLock();
-    private final long sizeInMillis;
-    private final long precisionInMillis;
-    private final TimeSource timeSource;
-
-    /**
-     * Creates a sliding rate whose time window is of the given size, with the given precision and time unit.
-     * <p>
-     * The precision defines how accurate the rate computation is, as it will be computed over window size +/-
-     * precision.
-     * </p>
-     */
-    public SlidingTimeRate(TimeSource timeSource, long size, long precision, TimeUnit unit)
-    {
-        Preconditions.checkArgument(size > precision, "Size should be greater than precision.");
-        Preconditions.checkArgument(TimeUnit.MILLISECONDS.convert(precision, unit) >= 1, "Precision must be greater than or equal to 1 millisecond.");
-        this.sizeInMillis = TimeUnit.MILLISECONDS.convert(size, unit);
-        this.precisionInMillis = TimeUnit.MILLISECONDS.convert(precision, unit);
-        this.timeSource = timeSource;
-    }
-
-    /**
-     * Updates the rate.
-     */
-    public void update(int delta)
-    {
-        pruneLock.readLock().lock();
-        try
-        {
-            while (true)
-            {
-                long now = timeSource.currentTimeMillis();
-                long lastTimestamp = lastCounterTimestamp.get();
-                boolean isWithinPrecisionRange = (now - lastTimestamp) < precisionInMillis;
-                AtomicInteger lastCounter = counters.get(lastTimestamp);
-                // If there's a valid counter for the current last timestamp, and we're in the precision range,
-                // update such counter:
-                if (lastCounter != null && isWithinPrecisionRange)
-                {
-                    lastCounter.addAndGet(delta);
-
-                    break;
-                }
-                // Else if there's no counter or we're past the precision range, try to create a new counter,
-                // but only the thread updating the last timestamp will create a new counter:
-                else if (lastCounterTimestamp.compareAndSet(lastTimestamp, now))
-                {
-                    AtomicInteger existing = counters.putIfAbsent(now, new AtomicInteger(delta));
-                    if (existing != null)
-                    {
-                        existing.addAndGet(delta);
-                    }
-
-                    break;
-                }
-            }
-        }
-        finally
-        {
-            pruneLock.readLock().unlock();
-        }
-    }
-
-    /**
-     * Gets the current rate in the given time unit from the beginning of the time window to the
-     * provided point in time ago.
-     */
-    public double get(long toAgo, TimeUnit unit)
-    {
-        pruneLock.readLock().lock();
-        try
-        {
-            long toAgoInMillis = TimeUnit.MILLISECONDS.convert(toAgo, unit);
-            Preconditions.checkArgument(toAgoInMillis < sizeInMillis, "Cannot get rate in the past!");
-
-            long now = timeSource.currentTimeMillis();
-            long sum = 0;
-            ConcurrentNavigableMap<Long, AtomicInteger> tailCounters = counters
-                    .tailMap(now - sizeInMillis, true)
-                    .headMap(now - toAgoInMillis, true);
-            for (AtomicInteger i : tailCounters.values())
-            {
-                sum += i.get();
-            }
-
-            double rateInMillis = sum == 0
-                                  ? sum
-                                  : sum / (double) Math.max(1000, (now - toAgoInMillis) - tailCounters.firstKey());
-            double multiplier = TimeUnit.MILLISECONDS.convert(1, unit);
-            return rateInMillis * multiplier;
-        }
-        finally
-        {
-            pruneLock.readLock().unlock();
-        }
-    }
-
-    /**
-     * Gets the current rate in the given time unit.
-     */
-    public double get(TimeUnit unit)
-    {
-        return get(0, unit);
-    }
-
-    /**
-     * Prunes the time window of old unused updates.
-     */
-    public void prune()
-    {
-        pruneLock.writeLock().lock();
-        try
-        {
-            long now = timeSource.currentTimeMillis();
-            counters.headMap(now - sizeInMillis, false).clear();
-        }
-        finally
-        {
-            pruneLock.writeLock().unlock();
-        }
-    }
-
-    @VisibleForTesting
-    public int size()
-    {
-        return counters.values().stream().reduce(new AtomicInteger(), (v1, v2) -> {
-            v1.addAndGet(v2.get());
-            return v1;
-        }).get();
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/SystemTimeSource.java b/src/java/org/apache/cassandra/utils/SystemTimeSource.java
deleted file mode 100644
index fef525e..0000000
--- a/src/java/org/apache/cassandra/utils/SystemTimeSource.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.util.concurrent.Uninterruptibles;
-
-/**
- * Time source backed by JVM clock.
- */
-public class SystemTimeSource implements TimeSource
-{
-    @Override
-    public long currentTimeMillis()
-    {
-        return System.currentTimeMillis();
-    }
-
-    @Override
-    public long nanoTime()
-    {
-        return System.nanoTime();
-    }
-
-    @Override
-    public TimeSource sleepUninterruptibly(long sleepFor, TimeUnit unit)
-    {
-        Uninterruptibles.sleepUninterruptibly(sleepFor, unit);
-        return this;
-    }
-
-    @Override
-    public TimeSource sleep(long sleepFor, TimeUnit unit) throws InterruptedException
-    {
-        TimeUnit.NANOSECONDS.sleep(TimeUnit.NANOSECONDS.convert(sleepFor, unit));
-        return this;
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/TimeSource.java b/src/java/org/apache/cassandra/utils/TimeSource.java
deleted file mode 100644
index 5d8acec..0000000
--- a/src/java/org/apache/cassandra/utils/TimeSource.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.TimeUnit;
-
-public interface TimeSource
-{
-    /**
-     *
-     * @return the current time in milliseconds
-     */
-    long currentTimeMillis();
-
-    /**
-     *
-     * @return Returns the current time value in nanoseconds.
-     *
-     * <p>This method can only be used to measure elapsed time and is
-     * not related to any other notion of system or wall-clock time.
-     */
-    long nanoTime();
-
-    /**
-     * Sleep for the given amount of time uninterruptibly.
-     *
-     * @param  sleepFor given amout.
-     * @param  unit time unit
-     * @return The time source itself after the given sleep period.
-     */
-    TimeSource sleepUninterruptibly(long sleepFor, TimeUnit unit);
-
-    /**
-     * Sleep for the given amount of time. This operation could interrupted.
-     * Hence after returning from this method, it is not guaranteed
-     * that the request amount of time has passed.
-     *
-     * @param  sleepFor given amout.
-     * @param  unit time unit
-     * @return The time source itself after the given sleep period.
-     */
-    TimeSource sleep(long sleepFor, TimeUnit unit) throws InterruptedException;
-}
diff --git a/src/java/org/apache/cassandra/utils/UUIDGen.java b/src/java/org/apache/cassandra/utils/UUIDGen.java
index c83e292..7cb8459 100644
--- a/src/java/org/apache/cassandra/utils/UUIDGen.java
+++ b/src/java/org/apache/cassandra/utils/UUIDGen.java
@@ -43,6 +43,8 @@ import com.google.common.primitives.Ints;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.locator.InetAddressAndPort;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * The goods are here: www.ietf.org/rfc/rfc4122.txt.
  */
@@ -323,7 +325,7 @@ public class UUIDGen
         while (true)
         {
             //Generate a candidate value for new lastNanos
-            newLastNanos = (System.currentTimeMillis() - START_EPOCH) * 10000;
+            newLastNanos = (currentTimeMillis() - START_EPOCH) * 10000;
             long originalLastNanos = lastNanos.get();
             if (newLastNanos > originalLastNanos)
             {
@@ -402,7 +404,7 @@ public class UUIDGen
         // Identify the process on the load: we use both the PID and class loader hash.
         long pid = NativeLibrary.getProcessID();
         if (pid < 0)
-            pid = new Random(System.currentTimeMillis()).nextLong();
+            pid = new Random(currentTimeMillis()).nextLong();
         updateWithLong(hasher, pid);
 
         ClassLoader loader = UUIDGen.class.getClassLoader();
diff --git a/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java b/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
index e53c5b0..0e7cc04 100644
--- a/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
+++ b/src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
@@ -40,6 +40,9 @@ import net.openhft.chronicle.queue.impl.single.SingleChronicleQueue;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Archives binary log files immediately when they are rolled using a configure archive command.
  *
@@ -184,12 +187,12 @@ public class ExternalArchiver implements BinLogArchiver
         public DelayFile(File file, long delay, TimeUnit delayUnit, int retries)
         {
             this.file = file;
-            this.delayTime = System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(delay, delayUnit);
+            this.delayTime = currentTimeMillis() + MILLISECONDS.convert(delay, delayUnit);
             this.retries = retries;
         }
         public long getDelay(TimeUnit unit)
         {
-            return unit.convert(delayTime - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+            return unit.convert(delayTime - currentTimeMillis(), TimeUnit.MILLISECONDS);
         }
 
         public int compareTo(Delayed o)
diff --git a/src/java/org/apache/cassandra/utils/concurrent/IntervalLock.java b/src/java/org/apache/cassandra/utils/concurrent/IntervalLock.java
deleted file mode 100644
index 382a2dc..0000000
--- a/src/java/org/apache/cassandra/utils/concurrent/IntervalLock.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils.concurrent;
-
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.cassandra.utils.TimeSource;
-
-/**
- * This class extends ReentrantReadWriteLock to provide a write lock that can only be acquired at provided intervals.
- */
-public class IntervalLock extends ReentrantReadWriteLock
-{
-    private final AtomicLong lastAcquire = new AtomicLong();
-    private final TimeSource timeSource;
-
-    public IntervalLock(TimeSource timeSource)
-    {
-        this.timeSource = timeSource;
-    }
-
-    /**
-     * Try acquiring a write lock if the given interval is passed since the last call to this method.
-     *
-     * @param interval In millis.
-     * @return True if acquired and locked, false otherwise.
-     */
-    public boolean tryIntervalLock(long interval)
-    {
-        long now = timeSource.currentTimeMillis();
-        boolean acquired = (now - lastAcquire.get() >= interval) && writeLock().tryLock();
-        if (acquired)
-            lastAcquire.set(now);
-
-        return acquired;
-    }
-
-    /**
-     * Release the last acquired interval lock.
-     */
-    public void releaseIntervalLock()
-    {
-        writeLock().unlock();
-    }
-
-    @VisibleForTesting
-    public long getLastIntervalAcquire()
-    {
-        return lastAcquire.get();
-    }
-}
diff --git a/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java b/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java
index 61ec640..844cfda 100644
--- a/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java
+++ b/src/java/org/apache/cassandra/utils/concurrent/SimpleCondition.java
@@ -22,6 +22,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.concurrent.locks.Condition;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 // fulfils the Condition interface without spurious wakeup problems
 // (or lost notify problems either: that is, even if you call await()
 // _after_ signal(), it will work as desired.)
@@ -49,7 +51,7 @@ public class SimpleCondition implements Condition
 
     public boolean await(long time, TimeUnit unit) throws InterruptedException
     {
-        long start = System.nanoTime();
+        long start = nanoTime();
         long until = start + unit.toNanos(time);
         return awaitUntil(until);
     }
diff --git a/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java b/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java
index 3647623..295af1b 100644
--- a/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java
+++ b/src/java/org/apache/cassandra/utils/concurrent/WaitQueue.java
@@ -26,6 +26,8 @@ import java.util.function.BooleanSupplier;
 
 import com.codahale.metrics.Timer;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * <p>A relatively easy to use utility for general purpose thread signalling.</p>
  * <p>Usage on a thread awaiting a state change using a WaitQueue q is:</p>
@@ -258,7 +260,7 @@ public final class WaitQueue
          * isSignalled() will be true on exit, and the method will return true; if timedout, the method will return
          * false and isCancelled() will be true; if interrupted an InterruptedException will be thrown and isCancelled()
          * will be true.
-         * @param nanos System.nanoTime() to wait until
+         * @param nanos nanoTime() to wait until
          * @return true if signalled, false if timed out
          * @throws InterruptedException
          */
@@ -268,7 +270,7 @@ public final class WaitQueue
          * Wait until signalled, or the provided time is reached, or the thread is interrupted. If signalled,
          * isSignalled() will be true on exit, and the method will return true; if timedout, the method will return
          * false and isCancelled() will be true
-         * @param nanos System.nanoTime() to wait until
+         * @param nanos nanoTime() to wait until
          * @return true if signalled, false if timed out
          */
         public boolean awaitUntilUninterruptibly(long nanos);
@@ -306,7 +308,7 @@ public final class WaitQueue
         public boolean awaitUntil(long until) throws InterruptedException
         {
             long now;
-            while (until > (now = System.nanoTime()) && !isSignalled())
+            while (until > (now = nanoTime()) && !isSignalled())
             {
                 checkInterrupted();
                 long delta = until - now;
@@ -318,7 +320,7 @@ public final class WaitQueue
         public boolean awaitUntilUninterruptibly(long until)
         {
             long now;
-            while (until > (now = System.nanoTime()) && !isSignalled())
+            while (until > (now = nanoTime()) && !isSignalled())
             {
                 long delta = until - now;
                 LockSupport.parkNanos(delta);
diff --git a/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java b/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java
index 12efd0d..1d5023d 100644
--- a/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java
+++ b/src/java/org/apache/cassandra/utils/progress/jmx/JMXProgressSupport.java
@@ -26,6 +26,8 @@ import javax.management.NotificationBroadcasterSupport;
 import org.apache.cassandra.utils.progress.ProgressEvent;
 import org.apache.cassandra.utils.progress.ProgressListener;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * ProgressListener that translates ProgressEvent to JMX Notification message.
  */
@@ -46,7 +48,7 @@ public class JMXProgressSupport implements ProgressListener
         Notification notification = new Notification("progress",
                                                      tag,
                                                      notificationSerialNumber.getAndIncrement(),
-                                                     System.currentTimeMillis(),
+                                                     currentTimeMillis(),
                                                      event.getMessage());
         Map<String, Integer> userData = new HashMap<>();
         userData.put("type", event.getType().ordinal());
diff --git a/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java b/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java
index fd7920a..b5f5718 100644
--- a/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java
+++ b/test/burn/org/apache/cassandra/concurrent/LongSharedExecutorPoolTest.java
@@ -31,6 +31,8 @@ import org.apache.commons.math3.distribution.WeibullDistribution;
 import org.junit.Ignore;
 import org.junit.Test;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class LongSharedExecutorPoolTest
 {
 
@@ -134,11 +136,11 @@ public class LongSharedExecutorPoolTest
         // (beyond max queued size), and longer operations
         for (float multiplier = 0f ; multiplier < 2.01f ; )
         {
-            if (System.nanoTime() > until)
+            if (nanoTime() > until)
             {
                 System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f));
                 events = 0;
-                until = System.nanoTime() + intervalNanos;
+                until = nanoTime() + intervalNanos;
                 multiplier += loadIncrement;
                 System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
             }
@@ -150,20 +152,20 @@ public class LongSharedExecutorPoolTest
             else if (pending.size() == executorCount) timeout = pending.first().timeout;
             else timeout = (long) (Math.random() * pending.last().timeout);
 
-            while (!pending.isEmpty() && timeout > System.nanoTime())
+            while (!pending.isEmpty() && timeout > nanoTime())
             {
                 Batch first = pending.first();
                 boolean complete = false;
                 try
                 {
                     for (Result result : first.results.descendingSet())
-                        result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
+                        result.future.get(timeout - nanoTime(), TimeUnit.NANOSECONDS);
                     complete = true;
                 }
                 catch (TimeoutException e)
                 {
                 }
-                if (!complete && System.nanoTime() > first.timeout)
+                if (!complete && nanoTime() > first.timeout)
                 {
                     for (Result result : first.results)
                         if (!result.future.isDone())
@@ -190,7 +192,7 @@ public class LongSharedExecutorPoolTest
             TreeSet<Result> results = new TreeSet<>();
             int count = (int) (workCount[executorIndex].sample() * multiplier);
             long targetTotalElapsed = 0;
-            long start = System.nanoTime();
+            long start = nanoTime();
             long baseTime;
             if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier);
             else  baseTime = 0;
@@ -205,11 +207,11 @@ public class LongSharedExecutorPoolTest
                     time = maxWorkTime;
                 targetTotalElapsed += time;
                 Future<?> future = executor.submit(new WaitTask(time));
-                results.add(new Result(future, System.nanoTime() + time));
+                results.add(new Result(future, nanoTime() + time));
             }
             long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                        + TimeUnit.MILLISECONDS.toNanos(100L);
-            long now = System.nanoTime();
+            long now = nanoTime();
             if (runs++ > executorCount && now > end)
                 throw new AssertionError();
             events += results.size();
diff --git a/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java b/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java
index eba8b65..23601c3 100644
--- a/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java
+++ b/test/burn/org/apache/cassandra/net/ConnectionBurnTest.java
@@ -64,6 +64,7 @@ import org.apache.cassandra.utils.memory.BufferPools;
 import static java.lang.Math.min;
 import static org.apache.cassandra.net.MessagingService.current_version;
 import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 import static org.apache.cassandra.utils.MonotonicClock.preciseTime;
 
@@ -259,7 +260,7 @@ public class ConnectionBurnTest
             Reporters reporters = new Reporters(endpoints, connections);
             try
             {
-                long deadline = System.nanoTime() + runForNanos;
+                long deadline = nanoTime() + runForNanos;
                 Verb._TEST_2.unsafeSetHandler(() -> message -> {});
                 Verb._TEST_2.unsafeSetSerializer(() -> serializer);
                 inbound.sockets.open().get();
@@ -345,7 +346,7 @@ public class ConnectionBurnTest
                 executor.execute(() -> {
                     Thread.currentThread().setName("Test-Reconnect");
                     ThreadLocalRandom random = ThreadLocalRandom.current();
-                    while (deadline > System.nanoTime())
+                    while (deadline > nanoTime())
                     {
                         try
                         {
@@ -411,7 +412,7 @@ public class ConnectionBurnTest
                     };
 
                     int count = 0;
-                    while (deadline > System.nanoTime())
+                    while (deadline > nanoTime())
                     {
 
                         try
@@ -465,7 +466,7 @@ public class ConnectionBurnTest
                     }
                 });
 
-                while (deadline > System.nanoTime() && failed.getCount() > 0)
+                while (deadline > nanoTime() && failed.getCount() > 0)
                 {
                     reporters.update();
                     reporters.print();
diff --git a/test/burn/org/apache/cassandra/net/Reporters.java b/test/burn/org/apache/cassandra/net/Reporters.java
index 9ab4643..1f4f823 100644
--- a/test/burn/org/apache/cassandra/net/Reporters.java
+++ b/test/burn/org/apache/cassandra/net/Reporters.java
@@ -30,12 +30,14 @@ import com.google.common.collect.ImmutableList;
 
 import org.apache.cassandra.locator.InetAddressAndPort;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 class Reporters
 {
     final Collection<InetAddressAndPort> endpoints;
     final Connection[] connections;
     final List<Reporter> reporters;
-    final long start = System.nanoTime();
+    final long start = nanoTime();
 
     Reporters(Collection<InetAddressAndPort> endpoints, Connection[] connections)
     {
@@ -66,7 +68,7 @@ class Reporters
 
     void print()
     {
-        System.out.println("==" + prettyPrintElapsed(System.nanoTime() - start) + "==\n");
+        System.out.println("==" + prettyPrintElapsed(nanoTime() - start) + "==\n");
 
         for (Reporter reporter : reporters)
         {
diff --git a/test/burn/org/apache/cassandra/net/Verifier.java b/test/burn/org/apache/cassandra/net/Verifier.java
index 219e613..60b014a 100644
--- a/test/burn/org/apache/cassandra/net/Verifier.java
+++ b/test/burn/org/apache/cassandra/net/Verifier.java
@@ -65,6 +65,7 @@ import static org.apache.cassandra.net.Verifier.EventType.SEND_FRAME;
 import static org.apache.cassandra.net.Verifier.EventType.SENT_FRAME;
 import static org.apache.cassandra.net.Verifier.EventType.SERIALIZE;
 import static org.apache.cassandra.net.Verifier.ExpiredMessageEvent.ExpirationType.ON_SENT;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 
 /**
@@ -1110,7 +1111,7 @@ public class Verifier
                                 throw new IllegalStateException();
                         }
 
-                        now = System.nanoTime();
+                        now = nanoTime();
                         if (m.expiresAtNanos > now)
                         {
                             // we fix the conversion AlmostSameTime for an entire run, which should suffice to guarantee these comparisons
@@ -1356,7 +1357,7 @@ public class Verifier
 
         public Event await(long id, long timeout, TimeUnit unit) throws InterruptedException
         {
-            return await(id, System.nanoTime() + unit.toNanos(timeout));
+            return await(id, nanoTime() + unit.toNanos(timeout));
         }
 
         public Event await(long id, long deadlineNanos) throws InterruptedException
@@ -1370,7 +1371,7 @@ public class Verifier
             readerWaiting = Thread.currentThread();
             while (null == (result = chunk.get(id)))
             {
-                long waitNanos = deadlineNanos - System.nanoTime();
+                long waitNanos = deadlineNanos - nanoTime();
                 if (waitNanos <= 0)
                     return null;
                 LockSupport.parkNanos(waitNanos);
diff --git a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
index 8aaf87e..5655867 100644
--- a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
+++ b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
@@ -45,6 +45,7 @@ import static org.apache.cassandra.transport.BurnTestUtil.SizeCaps;
 import static org.apache.cassandra.transport.BurnTestUtil.generateQueryMessage;
 import static org.apache.cassandra.transport.BurnTestUtil.generateQueryStatement;
 import static org.apache.cassandra.transport.BurnTestUtil.generateRows;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.assertj.core.api.Assertions.assertThat;
 
 public class DriverBurnTest extends CQLTester
@@ -385,10 +386,10 @@ public class DriverBurnTest extends CQLTester
 
                         for (int j = 0; j < perThread; j++)
                         {
-                            long startNanos = System.nanoTime();
+                            long startNanos = nanoTime();
                             ResultSetFuture future = session.executeAsync(request);
                             future.addListener(() -> {
-                                long diff = System.nanoTime() - startNanos;
+                                long diff = nanoTime() - startNanos;
                                 if (measure.get())
                                 {
                                     lock.lock();
diff --git a/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java b/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java
index 0427ad9..a050245 100644
--- a/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java
+++ b/test/burn/org/apache/cassandra/transport/SimpleClientPerfTest.java
@@ -54,6 +54,7 @@ import org.apache.cassandra.utils.Throwables;
 import static org.apache.cassandra.transport.BurnTestUtil.SizeCaps;
 import static org.apache.cassandra.transport.BurnTestUtil.generateQueryMessage;
 import static org.apache.cassandra.transport.BurnTestUtil.generateRows;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 @RunWith(Parameterized.class)
 public class SimpleClientPerfTest
@@ -219,9 +220,9 @@ public class SimpleClientPerfTest
                                 try
                                 {
                                     limiter.acquire();
-                                    long nanoStart = System.nanoTime();
+                                    long nanoStart = nanoTime();
                                     client.execute(messages);
-                                    long elapsed = System.nanoTime() - nanoStart;
+                                    long elapsed = nanoTime() - nanoStart;
 
                                     lock.lock();
                                     try
diff --git a/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java b/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java
index fc603c9..d9e8372 100644
--- a/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java
+++ b/test/burn/org/apache/cassandra/utils/memory/LongBufferPoolTest.java
@@ -39,6 +39,7 @@ import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.utils.DynamicList;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 /**
@@ -186,7 +187,7 @@ public class LongBufferPoolTest
             this.threadCount = threadCount;
             this.duration = duration;
             this.poolSize = Math.toIntExact(poolSize);
-            until = System.nanoTime() + duration;
+            until = nanoTime() + duration;
             latch = new CountDownLatch(threadCount);
             sharedRecycle = new SPSCQueue[threadCount];
             makingProgress = new AtomicBoolean[threadCount];
@@ -348,7 +349,7 @@ public class LongBufferPoolTest
                         }
                         else if (!recycleFromNeighbour())
                         {
-                            if (++spinCount > 1000 && System.nanoTime() > until)
+                            if (++spinCount > 1000 && nanoTime() > until)
                                 return;
                             // otherwise, free one of our other neighbour's buffers if can; and otherwise yield
                             Thread.yield();
@@ -568,7 +569,7 @@ public class LongBufferPoolTest
         {
             try
             {
-                while (System.nanoTime() < until)
+                while (nanoTime() < until)
                 {
                     checkpoint();
                     for (int i = 0 ; i < 100 ; i++)
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java b/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java
index 02f01c4..852b3e8 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Coordinator.java
@@ -49,6 +49,11 @@ import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.lang.Integer.MAX_VALUE;
+import static org.apache.cassandra.cql3.QueryOptions.create;
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class Coordinator implements ICoordinator
 {
     final Instance instance;
@@ -156,7 +161,7 @@ public class Coordinator implements ICoordinator
             prepared.validate(clientState);
             assert prepared instanceof SelectStatement : "Only SELECT statements can be executed with paging";
 
-            long nanoTime = System.nanoTime();
+            long nanoTime = nanoTime();
             SelectStatement selectStatement = (SelectStatement) prepared;
 
             QueryState queryState = new QueryState(clientState);
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
index 2a6b638..4dc5ffd 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
@@ -144,12 +144,13 @@ import static org.apache.cassandra.distributed.api.Feature.NETWORK;
 import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.fromCassandraInetAddressAndPort;
 import static org.apache.cassandra.distributed.impl.DistributedTestSnitch.toCassandraInetAddressAndPort;
 import static org.apache.cassandra.net.Verb.BATCH_STORE_REQ;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class Instance extends IsolatedExecutor implements IInvokableInstance
 {
     public final IInstanceConfig config;
     private volatile boolean initialized = false;
-    private final long startedAt = System.nanoTime();
+    private final long startedAt;
 
     // should never be invoked directly, so that it is instantiated on other class loader;
     // only visible for inheritance
@@ -160,6 +161,11 @@ public class Instance extends IsolatedExecutor implements IInvokableInstance
         Object clusterId = Objects.requireNonNull(config.get(Constants.KEY_DTEST_API_CLUSTER_ID), "cluster_id is not defined");
         ClusterIDDefiner.setId("cluster-" + clusterId);
         InstanceIDDefiner.setInstanceId(config.num());
+        // Defer initialisation of Clock.Global until cluster/instance identifiers are set.
+        // Otherwise, the instance classloader's logging classes are setup ahead of time and
+        // the patterns/file paths are not set correctly. This will be addressed in a subsequent
+        // commit to extend the functionality of the @Shared annotation to app classes.
+        startedAt = nanoTime();
         FBUtilities.setBroadcastInetAddressAndPort(InetAddressAndPort.getByAddressOverrideDefaults(config.broadcastAddress().getAddress(),
                                                                                                    config.broadcastAddress().getPort()));
 
@@ -566,7 +572,7 @@ public class Instance extends IsolatedExecutor implements IInvokableInstance
                 StorageService.instance.registerDaemon(CassandraDaemon.getInstanceForTesting());
                 if (config.has(GOSSIP))
                 {
-                    MigrationManager.setUptimeFn(() -> TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startedAt));
+                    MigrationManager.setUptimeFn(() -> TimeUnit.NANOSECONDS.toMillis(nanoTime() - startedAt));
                     try
                     {
                         StorageService.instance.initServer();
diff --git a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java
index 11a30e5..d7b8f99 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementAbruptDownedInstanceTest.java
@@ -44,6 +44,7 @@ import static org.apache.cassandra.distributed.shared.ClusterUtils.replaceHostAn
 import static org.apache.cassandra.distributed.shared.ClusterUtils.stopAbrupt;
 import static org.apache.cassandra.distributed.test.hostreplacement.HostReplacementTest.setupCluster;
 import static org.apache.cassandra.distributed.test.hostreplacement.HostReplacementTest.validateRows;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class HostReplacementAbruptDownedInstanceTest extends TestBaseImpl
 {
@@ -82,14 +83,14 @@ public class HostReplacementAbruptDownedInstanceTest extends TestBaseImpl
 //            peers.forEach(p -> validateRows(p.coordinator(), expectedState));
 
             // now create a new node to replace the other node
-            long startNanos = System.nanoTime();
+            long startNanos = nanoTime();
             IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove, properties -> {
                 // since node2 was killed abruptly its possible that node2's gossip state has an old schema version
                 // if this happens then bootstrap will fail waiting for a schema version it will never see; to avoid
                 // this, setting this property to log the warning rather than fail bootstrap
                 properties.set(BOOTSTRAP_SKIP_SCHEMA_CHECK, true);
             });
-            logger.info("Host replacement of {} with {} took {}", nodeToRemove, replacingNode, Duration.ofNanos(System.nanoTime() - startNanos));
+            logger.info("Host replacement of {} with {} took {}", nodeToRemove, replacingNode, Duration.ofNanos(nanoTime() - startNanos));
             peers.forEach(p -> awaitRingJoin(p, replacingNode));
 
             // make sure all nodes are healthy
diff --git a/test/long/org/apache/cassandra/cql3/CachingBench.java b/test/long/org/apache/cassandra/cql3/CachingBench.java
index f5f9ada..92b63b6 100644
--- a/test/long/org/apache/cassandra/cql3/CachingBench.java
+++ b/test/long/org/apache/cassandra/cql3/CachingBench.java
@@ -48,6 +48,7 @@ import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.assertj.core.api.Assertions.assertThat;
 
 public class CachingBench extends CQLTester
@@ -183,9 +184,9 @@ public class CachingBench extends CQLTester
             if (ii % (FLUSH_FREQ * 10) == 0)
             {
                 System.out.println("C");
-                long startTime = System.nanoTime();
+                long startTime = nanoTime();
                 getCurrentColumnFamilyStore().enableAutoCompaction(!CONCURRENT_COMPACTIONS);
-                long endTime = System.nanoTime();
+                long endTime = nanoTime();
                 compactionTimeNanos += endTime - startTime;
                 getCurrentColumnFamilyStore().disableAutoCompaction();
             }
diff --git a/test/long/org/apache/cassandra/cql3/GcCompactionBench.java b/test/long/org/apache/cassandra/cql3/GcCompactionBench.java
index 009c9e2..155335e 100644
--- a/test/long/org/apache/cassandra/cql3/GcCompactionBench.java
+++ b/test/long/org/apache/cassandra/cql3/GcCompactionBench.java
@@ -46,6 +46,8 @@ import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.schema.CompactionParams.TombstoneOption;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class GcCompactionBench extends CQLTester
 {
     private static final String SIZE_TIERED_STRATEGY = "SizeTieredCompactionStrategy', 'min_sstable_size' : '0";
@@ -183,9 +185,9 @@ public class GcCompactionBench extends CQLTester
             if (ii % (FLUSH_FREQ * 10) == 0)
             {
                 System.out.println("C");
-                long startTime = System.nanoTime();
+                long startTime = nanoTime();
                 getCurrentColumnFamilyStore().enableAutoCompaction(true);
-                long endTime = System.nanoTime();
+                long endTime = nanoTime();
                 compactionTimeNanos += endTime - startTime;
                 getCurrentColumnFamilyStore().disableAutoCompaction();
             }
diff --git a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
index fe8cdc2..9b4fe73 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
@@ -40,6 +40,8 @@ import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class LongCompactionsTest
@@ -122,7 +124,7 @@ public class LongCompactionsTest
         // give garbage collection a bit of time to catch up
         Thread.sleep(1000);
 
-        long start = System.nanoTime();
+        long start = nanoTime();
         final int gcBefore = (int) (System.currentTimeMillis() / 1000) - Schema.instance.getTableMetadata(KEYSPACE1, "Standard1").params.gcGraceSeconds;
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.COMPACTION))
         {
@@ -134,7 +136,7 @@ public class LongCompactionsTest
                                          sstableCount,
                                          partitionsPerSSTable,
                                          rowsPerPartition,
-                                         TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)));
+                                         TimeUnit.NANOSECONDS.toMillis(nanoTime() - start)));
     }
 
     @Test
diff --git a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
index b3cdaa1..19a8ec3 100644
--- a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
+++ b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
@@ -26,6 +26,8 @@ import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.concurrent.ThreadLocalRandom;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class CompressorPerformance
 {
 
@@ -71,7 +73,7 @@ public class CompressorPerformance
         int checksum = 0;
         int count = 100;
 
-        long time = System.nanoTime();
+        long time = nanoTime();
         long uncompressedBytes = 0;
         long compressedBytes = 0;
         for (int i=0; i<count; ++i)
@@ -85,12 +87,12 @@ public class CompressorPerformance
             checksum += output.get(ThreadLocalRandom.current().nextInt(output.position()));
             dataSource.rewind();
         }
-        long timec = System.nanoTime() - time;
+        long timec = nanoTime() - time;
         output.flip();
         input.put(output);
         input.flip();
 
-        time = System.nanoTime();
+        time = nanoTime();
         for (int i=0; i<count; ++i)
         {
             output.clear();
@@ -99,7 +101,7 @@ public class CompressorPerformance
             checksum += output.get(ThreadLocalRandom.current().nextInt(output.position()));
             input.rewind();
         }
-        long timed = System.nanoTime() - time;
+        long timed = nanoTime() - time;
         System.out.format("Compressor %s %s->%s compress %.3f ns/b %.3f mb/s uncompress %.3f ns/b %.3f mb/s ratio %.2f:1.%s\n",
                           compressor.getClass().getSimpleName(),
                           in,
diff --git a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
index e37045a..1864760 100644
--- a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
+++ b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
@@ -44,6 +44,7 @@ import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.OutputHandler;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class LongStreamingTest
@@ -99,13 +100,13 @@ public class LongStreamingTest
         Assert.assertEquals(useSstableCompression, compressionParams.isEnabled());
 
 
-        long start = System.nanoTime();
+        long start = nanoTime();
 
         for (int i = 0; i < 10_000_000; i++)
             writer.addRow(i, "test1", 24);
 
         writer.close();
-        System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start)));
+        System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(nanoTime() - start)));
 
         File[] dataFiles = dataDir.listFiles((dir, name) -> name.endsWith("-Data.db"));
         long dataSize = 0l;
@@ -132,10 +133,10 @@ public class LongStreamingTest
             }
         }, new OutputHandler.SystemOutput(false, false));
 
-        start = System.nanoTime();
+        start = nanoTime();
         loader.stream().get();
 
-        long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+        long millis = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
         System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec",
                                          millis/1000d,
                                          (dataSize / (1 << 20) / (millis / 1000d)) * 8));
@@ -159,19 +160,19 @@ public class LongStreamingTest
             }
         }, new OutputHandler.SystemOutput(false, false));
 
-        start = System.nanoTime();
+        start = nanoTime();
         loader.stream().get();
 
-        millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+        millis = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
         System.err.println(String.format("Finished Streaming in %.2f seconds: %.2f Mb/sec",
                                          millis/1000d,
                                          (dataSize / (1 << 20) / (millis / 1000d)) * 8));
 
 
         //Compact them both
-        start = System.nanoTime();
+        start = nanoTime();
         Keyspace.open(KS).getColumnFamilyStore(TABLE).forceMajorCompaction();
-        millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+        millis = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
 
         System.err.println(String.format("Finished Compacting in %.2f seconds: %.2f Mb/sec",
                                          millis / 1000d,
diff --git a/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java b/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java
index b79f154..1fff65c 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/BatchStatementBench.java
@@ -63,6 +63,7 @@ import org.openjdk.jmh.runner.options.Options;
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 
 @BenchmarkMode(Mode.Throughput)
@@ -87,7 +88,7 @@ public class BatchStatementBench
     String table = "tbl";
 
     int nowInSec = FBUtilities.nowInSeconds();
-    long queryStartTime = System.nanoTime();
+    long queryStartTime = nanoTime();
     BatchStatement bs;
     BatchQueryOptions bqo;
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java b/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java
index a3446aa..5addfa8 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/MessageOutBench.java
@@ -50,6 +50,7 @@ import org.openjdk.jmh.annotations.State;
 import org.openjdk.jmh.annotations.Warmup;
 
 import static org.apache.cassandra.net.Verb.ECHO_REQ;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 @State(Scope.Thread)
 @Warmup(iterations = 4, time = 1, timeUnit = TimeUnit.SECONDS)
@@ -96,7 +97,7 @@ public class MessageOutBench
     {
         try (DataOutputBuffer out = new DataOutputBuffer())
         {
-            Message.serializer.serialize(Message.builder(msgOut).withCreatedAt(System.nanoTime()).withId(42).build(),
+            Message.serializer.serialize(Message.builder(msgOut).withCreatedAt(nanoTime()).withId(42).build(),
                                          out, messagingVersion);
             DataInputBuffer in = new DataInputBuffer(out.buffer(), false);
             Message.serializer.deserialize(in, addr, messagingVersion);
diff --git a/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java b/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java
index 58200c9..b3c29f4 100644
--- a/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java
+++ b/test/unit/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutorTest.java
@@ -42,6 +42,8 @@ import org.apache.cassandra.tracing.TraceStateImpl;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.WrappedRunnable;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class DebuggableThreadPoolExecutorTest
 {
     @BeforeClass
@@ -66,7 +68,7 @@ public class DebuggableThreadPoolExecutorTest
                 Thread.sleep(50);
             }
         };
-        long start = System.nanoTime();
+        long start = nanoTime();
         for (int i = 0; i < 10; i++)
         {
             executor.execute(runnable);
@@ -74,7 +76,7 @@ public class DebuggableThreadPoolExecutorTest
         assert q.size() > 0 : q.size();
         while (executor.getCompletedTaskCount() < 10)
             continue;
-        long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
+        long delta = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
         assert delta >= 9 * 50 : delta;
     }
 
diff --git a/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java b/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java
index 983acfa..8768b77 100644
--- a/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/CustomNowInSecondsTest.java
@@ -34,6 +34,8 @@ import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.transport.messages.ResultMessage;
 
 import static java.lang.String.format;
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class CustomNowInSecondsTest extends CQLTester
@@ -154,7 +156,7 @@ public class CustomNowInSecondsTest extends CQLTester
             new BatchStatement(BatchStatement.Type.UNLOGGED, VariableSpecifications.empty(), statements, Attributes.none());
 
         // execute an BATCH message with now set to [now + 1 day], with ttl = 1, making its effective ttl = 1 day + 1.
-        QueryProcessor.instance.processBatch(batch, qs, batchQueryOptions(now + day), Collections.emptyMap(), System.nanoTime());
+        QueryProcessor.instance.processBatch(batch, qs, batchQueryOptions(now + day), emptyMap(), nanoTime());
 
         // verify that despite TTL having passed at now + 1 the rows are still there.
         assertEquals(2, executeSelect(format("SELECT * FROM %s.%s", ks, tbl), now + 1, false).size());
@@ -183,12 +185,12 @@ public class CustomNowInSecondsTest extends CQLTester
         if (prepared)
         {
             CQLStatement statement = QueryProcessor.parseStatement(query, cs);
-            return QueryProcessor.instance.processPrepared(statement, qs, queryOptions(nowInSeconds), Collections.emptyMap(), System.nanoTime());
+            return QueryProcessor.instance.processPrepared(statement, qs, queryOptions(nowInSeconds), emptyMap(), nanoTime());
         }
         else
         {
             CQLStatement statement = QueryProcessor.instance.parse(query, qs, queryOptions(nowInSeconds));
-            return QueryProcessor.instance.process(statement, qs, queryOptions(nowInSeconds), Collections.emptyMap(), System.nanoTime());
+            return QueryProcessor.instance.process(statement, qs, queryOptions(nowInSeconds), emptyMap(), nanoTime());
         }
     }
 
diff --git a/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java b/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java
index eca6c20..da9fc2a 100644
--- a/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java
+++ b/test/unit/org/apache/cassandra/cql3/PstmtPersistenceTest.java
@@ -36,6 +36,9 @@ import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.MD5Digest;
 
+import static java.util.Collections.emptyMap;
+import static org.apache.cassandra.service.QueryState.forInternalCalls;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class PstmtPersistenceTest extends CQLTester
@@ -129,7 +132,7 @@ public class PstmtPersistenceTest extends CQLTester
     {
         QueryProcessor.Prepared prepared = handler.getPrepared(stmtId);
         Assert.assertNotNull(prepared);
-        handler.processPrepared(prepared.statement, QueryState.forInternalCalls(), options, Collections.emptyMap(), System.nanoTime());
+        handler.processPrepared(prepared.statement, forInternalCalls(), options, emptyMap(), nanoTime());
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
index 54dd0c4..e37a294 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
@@ -31,6 +31,7 @@ import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class CollectionsTest extends CQLTester
@@ -695,7 +696,7 @@ public class CollectionsTest extends CQLTester
     public void testMapWithLargePartition() throws Throwable
     {
         Random r = new Random();
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         System.out.println("Seed " + seed);
         r.setSeed(seed);
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java
index e28e0cb..f550fdf 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/JsonTest.java
@@ -40,6 +40,7 @@ import java.text.SimpleDateFormat;
 import java.util.*;
 import java.util.concurrent.*;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -1329,7 +1330,7 @@ public class JsonTest extends CQLTester
         for (int i = 0; i < numRows; i++)
             execute("INSERT INTO %s (k, v) VALUES (?, ?)", "" + i, "" + i);
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         System.out.println("Seed " + seed);
         final Random rand = new Random(seed);
 
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
index 9f22857..bfe4815 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
@@ -59,6 +59,7 @@ import static java.lang.String.format;
 import static org.apache.cassandra.Util.throwAssert;
 import static org.apache.cassandra.utils.ByteBufferUtil.EMPTY_BYTE_BUFFER;
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -114,7 +115,7 @@ public class SecondaryIndexTest extends CQLTester
                              "CREATE INDEX " + indexName + " ON %s(b)");
 
         // IF NOT EXISTS should apply in cases where the new index differs from an existing one in name only
-        String otherIndexName = "index_" + System.nanoTime();
+        String otherIndexName = "index_" + nanoTime();
         assertEquals(1, getCurrentColumnFamilyStore().metadata().indexes.size());
         createIndex("CREATE INDEX IF NOT EXISTS " + otherIndexName + " ON %s(b)");
         assertEquals(1, getCurrentColumnFamilyStore().metadata().indexes.size());
diff --git a/test/unit/org/apache/cassandra/db/CleanupTest.java b/test/unit/org/apache/cassandra/db/CleanupTest.java
index 9965361..b028a4e 100644
--- a/test/unit/org/apache/cassandra/db/CleanupTest.java
+++ b/test/unit/org/apache/cassandra/db/CleanupTest.java
@@ -56,6 +56,7 @@ import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -155,8 +156,8 @@ public class CleanupTest
 
         ColumnMetadata cdef = cfs.metadata().getColumn(COLUMN);
         String indexName = "birthdate_key_index";
-        long start = System.nanoTime();
-        while (!cfs.getBuiltIndexes().contains(indexName) && System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10))
+        long start = nanoTime();
+        while (!cfs.getBuiltIndexes().contains(indexName) && nanoTime() - start < TimeUnit.SECONDS.toNanos(10))
             Thread.sleep(10);
 
         RowFilter cf = RowFilter.create();
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index 4c282a6..ba499ce 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -62,6 +62,7 @@ import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertSame;
@@ -511,7 +512,7 @@ public class DirectoriesTest
         {
             final Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
             assertEquals(cfDir(cfm), directories.getDirectoryForNewSSTables());
-            final String n = Long.toString(System.nanoTime());
+            final String n = Long.toString(nanoTime());
             Callable<File> directoryGetter = new Callable<File>() {
                 public File call() throws Exception {
                     Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, 1, SSTableFormat.Type.BIG);
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
index 8f61a05..bbe06a8 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
@@ -35,6 +35,7 @@ import org.apache.cassandra.distributed.impl.IsolatedExecutor;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertSame;
@@ -334,7 +335,7 @@ public class RangeTombstoneListTest
         int MAX_IT_DISTANCE = 10;
         int MAX_MARKEDAT = 10;
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         Random rand = new Random(seed);
 
         for (int i = 0; i < TEST_COUNT; i++)
diff --git a/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java b/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java
index 7336e03..e9ec640 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/BatchCommitLogTest.java
@@ -35,6 +35,7 @@ import org.apache.cassandra.db.RowUpdateBuilder;
 import org.apache.cassandra.security.EncryptionContext;
 
 import static org.junit.Assert.assertEquals;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class BatchCommitLogTest extends CommitLogTest
 {
@@ -62,18 +63,18 @@ public class BatchCommitLogTest extends CommitLogTest
                      .add("val", ByteBuffer.allocate(10 * 1024))
                      .build();
 
-        long startNano = System.nanoTime();
+        long startNano = nanoTime();
         CommitLog.instance.add(m);
-        long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNano);
+        long delta = TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNano);
         Assert.assertTrue("Expect batch commitlog sync immediately, but took " + delta, delta < CL_BATCH_SYNC_WINDOW);
     }
 
     @Test
     public void testBatchCLShutDownImmediately() throws InterruptedException
     {
-        long startNano = System.nanoTime();
+        long startNano = nanoTime();
         CommitLog.instance.shutdownBlocking();
-        long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNano);
+        long delta = TimeUnit.NANOSECONDS.toMillis(nanoTime() - startNano);
         Assert.assertTrue("Expect batch commitlog shutdown immediately, but took " + delta, delta < CL_BATCH_SYNC_WINDOW);
         CommitLog.instance.start();
     }
diff --git a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java
index 95542a1..d56c48b 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java
@@ -32,6 +32,7 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -71,7 +72,7 @@ public class CorruptedSSTablesCompactionsTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        long seed = System.nanoTime();
+        long seed = nanoTime();
 
         //long seed = 754271160974509L; // CASSANDRA-9530: use this seed to reproduce compaction failures if reading empty rows
         //long seed = 2080431860597L; // CASSANDRA-12359: use this seed to reproduce undetected corruptions
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
index b7b7d4a..01bfaae 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
@@ -47,6 +47,7 @@ import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -155,7 +156,7 @@ public class RealTransactionsTest extends SchemaLoader
                  CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())
             )
             {
-                long lastCheckObsoletion = System.nanoTime();
+                long lastCheckObsoletion = nanoTime();
                 File directory = txn.originals().iterator().next().descriptor.directory;
                 Descriptor desc = cfs.newSSTableDescriptor(directory);
                 TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
@@ -173,10 +174,10 @@ public class RealTransactionsTest extends SchemaLoader
                 {
                     rewriter.append(ci.next());
 
-                    if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
+                    if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
                     {
                         controller.maybeRefreshOverlaps();
-                        lastCheckObsoletion = System.nanoTime();
+                        lastCheckObsoletion = nanoTime();
                     }
                 }
 
diff --git a/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java b/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java
index dc8c317..dc1f3e9 100644
--- a/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java
+++ b/test/unit/org/apache/cassandra/db/monitoring/MonitoringTaskTest.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -32,10 +31,11 @@ import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.utils.ApproximateTime;
-
+import static java.lang.Thread.currentThread;
+import static java.util.UUID.randomUUID;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -98,8 +98,8 @@ public class MonitoringTaskTest
         long timeout = operations.stream().map(Monitorable::timeoutNanos).reduce(0L, Long::max);
         Thread.sleep(NANOSECONDS.toMillis(timeout * 2 + approxTime.error()));
 
-        long start = System.nanoTime();
-        while(System.nanoTime() - start <= MAX_SPIN_TIME_NANOS)
+        long start = nanoTime();
+        while(nanoTime() - start <= MAX_SPIN_TIME_NANOS)
         {
             long numInProgress = operations.stream().filter(Monitorable::isInProgress).count();
             if (numInProgress == 0)
@@ -117,8 +117,8 @@ public class MonitoringTaskTest
         long timeout = operations.stream().map(Monitorable::slowTimeoutNanos).reduce(0L, Long::max);
         Thread.sleep(NANOSECONDS.toMillis(timeout * 2 + approxTime.error()));
 
-        long start = System.nanoTime();
-        while(System.nanoTime() - start <= MAX_SPIN_TIME_NANOS)
+        long start = nanoTime();
+        while(nanoTime() - start <= MAX_SPIN_TIME_NANOS)
         {
             long numSlow = operations.stream().filter(Monitorable::isSlow).count();
             if (numSlow == operations.size())
@@ -129,7 +129,7 @@ public class MonitoringTaskTest
     @Test
     public void testAbort() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test abort", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test abort", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.isAborted());
@@ -140,7 +140,7 @@ public class MonitoringTaskTest
     @Test
     public void testAbortIdemPotent() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test abort", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test abort", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.abort());
@@ -153,7 +153,7 @@ public class MonitoringTaskTest
     @Test
     public void testAbortCrossNode() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test for cross node", System.nanoTime(), true, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test for cross node", nanoTime(), true, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.isAborted());
@@ -164,7 +164,7 @@ public class MonitoringTaskTest
     @Test
     public void testComplete() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test complete", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test complete", nanoTime(), false, timeout, slowTimeout);
         operation.complete();
         waitForOperationsToComplete(operation);
 
@@ -176,7 +176,7 @@ public class MonitoringTaskTest
     @Test
     public void testCompleteIdemPotent() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test complete", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test complete", nanoTime(), false, timeout, slowTimeout);
         operation.complete();
         waitForOperationsToComplete(operation);
 
@@ -190,7 +190,7 @@ public class MonitoringTaskTest
     @Test
     public void testReportSlow() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test report slow", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test report slow", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToBeReportedAsSlow(operation);
 
         assertTrue(operation.isSlow());
@@ -204,7 +204,7 @@ public class MonitoringTaskTest
     public void testNoReportSlowIfZeroSlowTimeout() throws InterruptedException
     {
         // when the slow timeout is set to zero then operation won't be reported as slow
-        Monitorable operation = new TestMonitor("Test report slow disabled", System.nanoTime(), false, timeout, 0);
+        Monitorable operation = new TestMonitor("Test report slow disabled", nanoTime(), false, timeout, 0);
         waitForOperationsToBeReportedAsSlow(operation);
 
         assertTrue(operation.isSlow());
@@ -217,7 +217,7 @@ public class MonitoringTaskTest
     @Test
     public void testReport() throws InterruptedException
     {
-        Monitorable operation = new TestMonitor("Test report", System.nanoTime(), false, timeout, slowTimeout);
+        Monitorable operation = new TestMonitor("Test report", nanoTime(), false, timeout, slowTimeout);
         waitForOperationsToComplete(operation);
 
         assertTrue(operation.isSlow());
@@ -238,13 +238,13 @@ public class MonitoringTaskTest
         MonitoringTask.instance = MonitoringTask.make(10, -1);
         try
         {
-            Monitorable operation1 = new TestMonitor("Test report 1", System.nanoTime(), false, timeout, slowTimeout);
+            Monitorable operation1 = new TestMonitor("Test report 1", nanoTime(), false, timeout, slowTimeout);
             waitForOperationsToComplete(operation1);
 
             assertTrue(operation1.isAborted());
             assertFalse(operation1.isCompleted());
 
-            Monitorable operation2 = new TestMonitor("Test report 2", System.nanoTime(), false, timeout, slowTimeout);
+            Monitorable operation2 = new TestMonitor("Test report 2", nanoTime(), false, timeout, slowTimeout);
             waitForOperationsToBeReportedAsSlow(operation2);
 
             operation2.complete();
@@ -271,7 +271,7 @@ public class MonitoringTaskTest
         for (int i = 0; i < opCount; i++)
         {
             executorService.submit(() ->
-                operations.add(new TestMonitor(UUID.randomUUID().toString(), System.nanoTime(), false, timeout, slowTimeout))
+                operations.add(new TestMonitor(randomUUID().toString(), nanoTime(), false, timeout, slowTimeout))
             );
         }
 
@@ -316,14 +316,14 @@ public class MonitoringTaskTest
                         for (int j = 0; j < numTimes; j++)
                         {
                             Monitorable operation1 = new TestMonitor(operationName,
-                                                                     System.nanoTime(),
+                                                                     nanoTime(),
                                                                      false,
                                                                      timeout,
                                                                      slowTimeout);
                             waitForOperationsToComplete(operation1);
 
                             Monitorable operation2 = new TestMonitor(operationName,
-                                                                     System.nanoTime(),
+                                                                     nanoTime(),
                                                                      false,
                                                                      timeout,
                                                                      slowTimeout);
@@ -371,7 +371,7 @@ public class MonitoringTaskTest
                 try
                 {
                     Monitorable operation = new TestMonitor("Test testMultipleThreadsSameName failed",
-                                                            System.nanoTime(),
+                                                            nanoTime(),
                                                             false,
                                                             timeout,
                                                             slowTimeout);
@@ -405,7 +405,7 @@ public class MonitoringTaskTest
                 try
                 {
                     Monitorable operation = new TestMonitor("Test testMultipleThreadsSameName slow",
-                                                            System.nanoTime(),
+                                                            nanoTime(),
                                                             false,
                                                             timeout,
                                                             slowTimeout);
@@ -440,8 +440,8 @@ public class MonitoringTaskTest
             executorService.submit(() -> {
                 try
                 {
-                    Monitorable operation = new TestMonitor("Test thread " + Thread.currentThread().getName(),
-                                                            System.nanoTime(),
+                    Monitorable operation = new TestMonitor("Test thread " + currentThread().getName(),
+                                                            nanoTime(),
                                                             false,
                                                             timeout,
                                                             slowTimeout);
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
index 01cd0dd..1318227 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
@@ -42,6 +42,7 @@ import org.apache.cassandra.utils.Pair;
 import static org.apache.cassandra.io.sstable.IndexSummaryBuilder.downsample;
 import static org.apache.cassandra.io.sstable.IndexSummaryBuilder.entriesAtSamplingLevel;
 import static org.apache.cassandra.io.sstable.Downsampling.BASE_SAMPLING_LEVEL;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class IndexSummaryTest
@@ -53,7 +54,7 @@ public class IndexSummaryTest
     {
         DatabaseDescriptor.daemonInitialization();
 
-        final long seed = System.nanoTime();
+        final long seed = nanoTime();
         System.out.println("Using seed: " + seed);
         random.setSeed(seed);
     }
@@ -63,7 +64,7 @@ public class IndexSummaryTest
     @BeforeClass
     public static void setup()
     {
-        final long seed = System.nanoTime();
+        final long seed = nanoTime();
         System.out.println("Using seed: " + seed);
         random.setSeed(seed);
     }
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
index 2510c5e..c4e5207 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
@@ -47,6 +47,7 @@ import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.io.util.*;
 import org.apache.cassandra.schema.*;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -95,7 +96,7 @@ public class SSTableCorruptionDetectionTest extends SSTableWriterTestBase
         maxValueSize = DatabaseDescriptor.getMaxValueSize();
         DatabaseDescriptor.setMaxValueSize(1024 * 1024);
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         logger.info("Seed {}", seed);
         random = new Random(seed);
 
diff --git a/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java b/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java
index c5c3b60..66f506d 100644
--- a/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java
+++ b/test/unit/org/apache/cassandra/io/util/BufferedDataOutputStreamTest.java
@@ -44,6 +44,7 @@ import com.google.common.primitives.UnsignedBytes;
 import com.google.common.primitives.UnsignedInteger;
 import com.google.common.primitives.UnsignedLong;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.FBUtilities.preventIllegalAccessWarnings;
 import static org.junit.Assert.*;
 
@@ -171,7 +172,7 @@ public class BufferedDataOutputStreamTest
 
     static Field baos_bytes;
     static {
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 210187780999648L;
         System.out.println("Seed " + seed);
         r = new Random(seed);
diff --git a/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java b/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java
index b040d27..0f72e33 100644
--- a/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/io/util/FileSegmentInputStreamTest.java
@@ -29,6 +29,7 @@ import org.junit.Test;
 
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -38,7 +39,7 @@ public class FileSegmentInputStreamTest
     private ByteBuffer allocateBuffer(int size)
     {
         ByteBuffer ret = ByteBuffer.allocate(Ints.checkedCast(size));
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         System.out.println("Seed " + seed);
 
diff --git a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
index 2814bab..400bea9 100644
--- a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
+++ b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
@@ -39,6 +39,7 @@ import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.CompressionParams;
 
 import static junit.framework.Assert.assertNull;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -57,7 +58,7 @@ public class MmappedRegionsTest
     private static ByteBuffer allocateBuffer(int size)
     {
         ByteBuffer ret = ByteBuffer.allocate(Ints.checkedCast(size));
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         logger.info("Seed {}", seed);
 
diff --git a/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java b/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java
index 829cfcf..eba6d8d 100644
--- a/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java
@@ -41,6 +41,7 @@ import com.google.common.primitives.UnsignedBytes;
 import com.google.common.primitives.UnsignedInteger;
 import com.google.common.primitives.UnsignedLong;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class NIODataInputStreamTest
@@ -51,7 +52,7 @@ public class NIODataInputStreamTest
 
     void init()
     {
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         System.out.println("Seed " + seed);
         r = new Random(seed);
diff --git a/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java
index 8904daa..0d86f0b 100644
--- a/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java
@@ -43,6 +43,7 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -428,7 +429,7 @@ public class RandomAccessReaderTest
         final File f = FileUtils.createTempFile("testMark", "1");
         final byte[] expected = new byte[1 << 16];
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         //seed = 365238103404423L;
         logger.info("Seed {}", seed);
         Random r = new Random(seed);
diff --git a/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java b/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java
index 06ce83c..c1b76b3 100644
--- a/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java
+++ b/test/unit/org/apache/cassandra/locator/TokenMetadataTest.java
@@ -64,7 +64,7 @@ public class TokenMetadataTest
     @Before
     public void before() throws Throwable
     {
-        tmd = StorageService.instance.getTokenMetadata();
+        tmd = new TokenMetadata();
         tmd.updateNormalToken(token(ONE), InetAddressAndPort.getByName("127.0.0.1"));
         tmd.updateNormalToken(token(SIX), InetAddressAndPort.getByName("127.0.0.6"));
     }
diff --git a/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java b/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java
index b62078c..8959046 100644
--- a/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java
+++ b/test/unit/org/apache/cassandra/metrics/DecayingEstimatedHistogramReservoirTest.java
@@ -37,6 +37,7 @@ import org.apache.cassandra.utils.EstimatedHistogram;
 import org.apache.cassandra.utils.Pair;
 import org.quicktheories.core.Gen;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -143,7 +144,7 @@ public class DecayingEstimatedHistogramReservoirTest
                                                                                            nStripes,
                                                                                            clock);
 
-        long seed = System.nanoTime();
+        long seed = nanoTime();
         System.out.println("DecayingEstimatedHistogramReservoirTest#testStriping.seed = " + seed);
         Random valGen = new Random(seed);
         ExecutorService executors = Executors.newFixedThreadPool(nStripes * 2);
diff --git a/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java b/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java
index 3d0508c..1ffcdef 100644
--- a/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java
+++ b/test/unit/org/apache/cassandra/net/AsyncOneResponseTest.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertTrue;
 
 public class AsyncOneResponseTest
@@ -43,9 +44,9 @@ public class AsyncOneResponseTest
         final long expectedTimeoutMillis = 1000; // Should time out after roughly this time
         final long schedulingError = 10; // Scheduling is imperfect
 
-        long startTime = System.nanoTime();
+        long startTime = nanoTime();
         boolean timeout = !response.await(expectedTimeoutMillis, TimeUnit.MILLISECONDS);
-        long endTime = System.nanoTime();
+        long endTime = nanoTime();
 
         assertTrue(timeout);
         assertTrue(TimeUnit.NANOSECONDS.toMillis(endTime - startTime) > (expectedTimeoutMillis - schedulingError));
diff --git a/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java b/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java
index b575747..bc41992 100644
--- a/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java
+++ b/test/unit/org/apache/cassandra/net/AsyncStreamingInputPlusTest.java
@@ -36,6 +36,7 @@ import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
 import org.apache.cassandra.net.AsyncStreamingInputPlus;
 import org.apache.cassandra.net.AsyncStreamingInputPlus.InputTimeoutException;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertFalse;
 
 public class AsyncStreamingInputPlusTest
@@ -263,7 +264,7 @@ public class AsyncStreamingInputPlusTest
         long timeoutMillis = 1000;
         inputPlus = new AsyncStreamingInputPlus(channel, timeoutMillis, TimeUnit.MILLISECONDS);
 
-        long startNanos = System.nanoTime();
+        long startNanos = nanoTime();
         try
         {
             inputPlus.readInt();
@@ -274,7 +275,7 @@ public class AsyncStreamingInputPlusTest
             // this is the success case, and is expected. any other exception is a failure.
         }
 
-        long durationNanos = System.nanoTime() - startNanos;
+        long durationNanos = nanoTime() - startNanos;
         Assert.assertTrue(TimeUnit.MILLISECONDS.toNanos(timeoutMillis) <= durationNanos);
     }
 }
diff --git a/test/unit/org/apache/cassandra/net/ConnectionTest.java b/test/unit/org/apache/cassandra/net/ConnectionTest.java
index 5c637ac..7abdcec 100644
--- a/test/unit/org/apache/cassandra/net/ConnectionTest.java
+++ b/test/unit/org/apache/cassandra/net/ConnectionTest.java
@@ -68,6 +68,7 @@ import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.MINUTES;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.cassandra.net.MessagingService.VERSION_30;
@@ -80,6 +81,7 @@ import static org.apache.cassandra.net.ConnectionType.LARGE_MESSAGES;
 import static org.apache.cassandra.net.ConnectionType.SMALL_MESSAGES;
 import static org.apache.cassandra.net.OutboundConnectionSettings.Framing.LZ4;
 import static org.apache.cassandra.net.OutboundConnections.LARGE_MESSAGE_THRESHOLD;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 
 public class ConnectionTest
@@ -336,7 +338,7 @@ public class ConnectionTest
             });
             unsafeSetHandler(Verb._TEST_1, () -> msg -> receiveDone.countDown());
             Message<?> message = Message.builder(Verb._TEST_1, new Object())
-                                        .withExpiresAt(System.nanoTime() + SECONDS.toNanos(30L))
+                                        .withExpiresAt(nanoTime() + SECONDS.toNanos(30L))
                                         .build();
             for (int i = 0 ; i < count ; ++i)
                 outbound.enqueue(message);
@@ -444,7 +446,7 @@ public class ConnectionTest
 
             AtomicInteger serialized = new AtomicInteger();
             Message<?> message = Message.builder(Verb._TEST_1, new Object())
-                                        .withExpiresAt(System.nanoTime() + SECONDS.toNanos(30L))
+                                        .withExpiresAt(nanoTime() + SECONDS.toNanos(30L))
                                         .build();
             unsafeSetSerializer(Verb._TEST_1, () -> new IVersionedSerializer<Object>()
             {
@@ -663,7 +665,7 @@ public class ConnectionTest
     {
         testManual((settings, inbound, outbound, endpoint) -> {
             Message<?> message = Message.builder(Verb._TEST_1, noPayload)
-                                        .withExpiresAt(System.nanoTime() + SECONDS.toNanos(30L))
+                                        .withExpiresAt(nanoTime() + SECONDS.toNanos(30L))
                                         .build();
 
             for (int i = 0 ; i < 1000 ; ++i)
@@ -683,12 +685,12 @@ public class ConnectionTest
                     for (int i = 0; i < 5; i++)
                     {
                         Message<?> message = Message.builder(Verb._TEST_1, noPayload)
-                                                    .withExpiresAt(System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(50L))
+                                                    .withExpiresAt(nanoTime() + MILLISECONDS.toNanos(50L))
                                                     .build();
                         OutboundMessageQueue queue = outbound.queue;
                         while (true)
                         {
-                            try (OutboundMessageQueue.WithLock withLock = queue.lockOrCallback(System.nanoTime(), null))
+                            try (OutboundMessageQueue.WithLock withLock = queue.lockOrCallback(nanoTime(), null))
                             {
                                 if (withLock != null)
                                 {
diff --git a/test/unit/org/apache/cassandra/service/PaxosStateTest.java b/test/unit/org/apache/cassandra/service/PaxosStateTest.java
index 7d69223..0329ccf 100644
--- a/test/unit/org/apache/cassandra/service/PaxosStateTest.java
+++ b/test/unit/org/apache/cassandra/service/PaxosStateTest.java
@@ -39,6 +39,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.UUIDGen;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class PaxosStateTest
@@ -60,7 +61,7 @@ public class PaxosStateTest
     public void testCommittingAfterTruncation() throws Exception
     {
         ColumnFamilyStore cfs = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1");
-        String key = "key" + System.nanoTime();
+        String key = "key" + nanoTime();
         ByteBuffer value = ByteBufferUtil.bytes(0);
         RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), FBUtilities.timestampMicros(), key);
         builder.clustering("a").add("val", value);
@@ -108,7 +109,7 @@ public class PaxosStateTest
     public void testPrepareProposePaxos() throws Throwable
     {
         ColumnFamilyStore cfs = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1");
-        String key = "key" + System.nanoTime();
+        String key = "key" + nanoTime();
         ByteBuffer value = ByteBufferUtil.bytes(0);
         RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), FBUtilities.timestampMicros(), key);
         builder.clustering("a").add("val", value);
diff --git a/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java b/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java
index 5d8d191..837cf1b 100644
--- a/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java
+++ b/test/unit/org/apache/cassandra/service/WriteResponseHandlerTest.java
@@ -49,7 +49,9 @@ import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static java.util.concurrent.TimeUnit.DAYS;
 import static org.apache.cassandra.net.NoPayload.noPayload;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -145,7 +147,7 @@ public class WriteResponseHandlerTest
     {
         long startingCount = ks.metric.idealCLWriteLatency.latency.getCount();
         //Specify query start time in past to ensure minimum latency measurement
-        AbstractWriteResponseHandler awr = createWriteResponseHandler(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM, System.nanoTime() - TimeUnit.DAYS.toNanos(1));
+        AbstractWriteResponseHandler awr = createWriteResponseHandler(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM, nanoTime() - DAYS.toNanos(1));
 
         //dc1
         awr.onResponse(createDummyMessage(0));
@@ -260,7 +262,7 @@ public class WriteResponseHandlerTest
 
     private static AbstractWriteResponseHandler createWriteResponseHandler(ConsistencyLevel cl, ConsistencyLevel ideal)
     {
-        return createWriteResponseHandler(cl, ideal, System.nanoTime());
+        return createWriteResponseHandler(cl, ideal, nanoTime());
     }
 
     private static AbstractWriteResponseHandler createWriteResponseHandler(ConsistencyLevel cl, ConsistencyLevel ideal, long queryStartTime)
diff --git a/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java b/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java
index 900a40f..593e3e4 100644
--- a/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java
@@ -76,6 +76,8 @@ import static org.apache.cassandra.Util.assertClustering;
 import static org.apache.cassandra.Util.assertColumn;
 import static org.apache.cassandra.Util.assertColumns;
 import static org.apache.cassandra.db.ClusteringBound.Kind;
+import static org.apache.cassandra.db.ConsistencyLevel.ALL;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -131,7 +133,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveNewerSingleRow()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
                                                                                                      .add("c1", "v1")
@@ -163,7 +165,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveDisjointSingleRow()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
                                                                                                      .add("c1", "v1")
@@ -200,7 +202,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveDisjointMultipleRows() throws UnknownHostException
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
                                                                                                      .add("c1", "v1")
@@ -247,7 +249,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveDisjointMultipleRowsWithRangeTombstones()
     {
         EndpointsForRange replicas = makeReplicas(4);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
 
         RangeTombstone tombstone1 = tombstone("1", "11", 1, nowInSec);
         RangeTombstone tombstone2 = tombstone("3", "31", 1, nowInSec);
@@ -328,7 +330,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveWithOneEmpty()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1")
                                                                                                      .add("c2", "v2")
@@ -359,7 +361,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     {
         EndpointsForRange replicas = makeReplicas(2);
         TestableReadRepair readRepair = new TestableReadRepair(command);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         resolver.preprocess(response(command, replicas.get(0).endpoint(), EmptyIterators.unfilteredPartition(cfm)));
         resolver.preprocess(response(command, replicas.get(1).endpoint(), EmptyIterators.unfilteredPartition(cfm)));
 
@@ -375,7 +377,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveDeleted()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         // one response with columns timestamped before a delete in another response
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1")
@@ -401,7 +403,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testResolveMultipleDeleted()
     {
         EndpointsForRange replicas = makeReplicas(4);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         // deletes and columns with interleaved timestamp, with out of order return sequence
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         resolver.preprocess(response(command, peer1, fullPartitionDelete(cfm, dk, 0, nowInSec)));
@@ -486,7 +488,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     private void resolveRangeTombstonesOnBoundary(long timestamp1, long timestamp2)
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -560,7 +562,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     private void testRepairRangeTombstoneBoundary(int timestamp1, int timestamp2, int timestamp3) throws UnknownHostException
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -613,7 +615,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     {
         EndpointsForRange replicas = makeReplicas(2);
 
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -652,7 +654,7 @@ public class DataResolverTest extends AbstractReadResponseTest
     public void testRepairRangeTombstoneWithPartitionDeletion2()
     {
         EndpointsForRange replicas = makeReplicas(2);
-        DataResolver resolver = new DataResolver(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
         InetAddressAndPort peer1 = replicas.get(0).endpoint();
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
 
@@ -736,7 +738,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -788,7 +790,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -832,7 +834,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -882,7 +884,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         EndpointsForRange replicas = makeReplicas(2);
         ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
         TestableReadRepair readRepair = new TestableReadRepair(cmd);
-        DataResolver resolver = new DataResolver(cmd, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime());
+        DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
 
         long[] ts = {100, 200};
 
@@ -938,7 +940,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -958,7 +960,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -978,7 +980,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest1, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(), verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
@@ -998,7 +1000,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(), verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -1018,7 +1020,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(), verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
@@ -1038,7 +1040,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest1, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
@@ -1059,7 +1061,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1") .buildUpdate()), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
@@ -1080,7 +1082,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest2, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
@@ -1101,7 +1103,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest2, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
@@ -1122,7 +1124,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, false);
         verifier.expectDigest(peer2, digest2, false);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, false, command));
@@ -1143,7 +1145,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         verifier.expectDigest(peer1, digest1, true);
         verifier.expectDigest(peer2, digest2, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1") .buildUpdate()), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
@@ -1163,7 +1165,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
         verifier.expectDigest(peer1, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
 
@@ -1185,7 +1187,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
         verifier.expectDigest(peer1, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         // peer2 is advertising an older version, so when we deserialize its response there are two things to note:
@@ -1217,7 +1219,7 @@ public class DataResolverTest extends AbstractReadResponseTest
         InetAddressAndPort peer2 = replicas.get(1).endpoint();
         verifier.expectDigest(peer1, digest1, true);
 
-        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ConsistencyLevel.ALL), readRepair, System.nanoTime(),  verifier);
+        DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
 
         resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest1, true, command));
         resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm,dk)), digest2, true, command));
diff --git a/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java b/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java
index ac68205..962801f 100644
--- a/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/ReadExecutorTest.java
@@ -45,8 +45,11 @@ import org.apache.cassandra.net.NoPayload;
 import org.apache.cassandra.net.Verb;
 import org.apache.cassandra.schema.KeyspaceParams;
 
+import static java.util.concurrent.TimeUnit.DAYS;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.db.ConsistencyLevel.LOCAL_QUORUM;
 import static org.apache.cassandra.locator.ReplicaUtils.full;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
@@ -92,7 +95,7 @@ public class ReadExecutorTest
     {
         assertEquals(0, cfs.metric.speculativeInsufficientReplicas.getCount());
         assertEquals(0, ks.metric.speculativeInsufficientReplicas.getCount());
-        AbstractReadExecutor executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, ConsistencyLevel.LOCAL_QUORUM), System.nanoTime(), true);
+        AbstractReadExecutor executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, LOCAL_QUORUM), nanoTime(), true);
         executor.maybeTryAdditionalReplicas();
         try
         {
@@ -107,7 +110,7 @@ public class ReadExecutorTest
         assertEquals(1, ks.metric.speculativeInsufficientReplicas.getCount());
 
         //Shouldn't increment
-        executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, ConsistencyLevel.LOCAL_QUORUM), System.nanoTime(), false);
+        executor = new AbstractReadExecutor.NeverSpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(targets, LOCAL_QUORUM), nanoTime(), false);
         executor.maybeTryAdditionalReplicas();
         try
         {
@@ -133,7 +136,7 @@ public class ReadExecutorTest
         assertEquals(0, cfs.metric.speculativeFailedRetries.getCount());
         assertEquals(0, ks.metric.speculativeRetries.getCount());
         assertEquals(0, ks.metric.speculativeFailedRetries.getCount());
-        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(TimeUnit.DAYS.toMillis(365)), plan(ConsistencyLevel.LOCAL_QUORUM, targets, targets.subList(0, 2)), System.nanoTime());
+        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(DAYS.toMillis(365)), plan(LOCAL_QUORUM, targets, targets.subList(0, 2)), nanoTime());
         executor.maybeTryAdditionalReplicas();
         new Thread()
         {
@@ -174,7 +177,7 @@ public class ReadExecutorTest
         assertEquals(0, cfs.metric.speculativeFailedRetries.getCount());
         assertEquals(0, ks.metric.speculativeRetries.getCount());
         assertEquals(0, ks.metric.speculativeFailedRetries.getCount());
-        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(ConsistencyLevel.LOCAL_QUORUM, targets, targets.subList(0, 2)), System.nanoTime());
+        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, new MockSinglePartitionReadCommand(), plan(LOCAL_QUORUM, targets, targets.subList(0, 2)), nanoTime());
         executor.maybeTryAdditionalReplicas();
         try
         {
@@ -200,7 +203,7 @@ public class ReadExecutorTest
     {
         MockSinglePartitionReadCommand command = new MockSinglePartitionReadCommand(TimeUnit.DAYS.toMillis(365));
         ReplicaPlan.ForTokenRead plan = plan(ConsistencyLevel.LOCAL_ONE, targets, targets.subList(0, 1));
-        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, command, plan, System.nanoTime());
+        AbstractReadExecutor executor = new AbstractReadExecutor.SpeculatingReadExecutor(cfs, command, plan, nanoTime());
 
         // Issue an initial request against the first endpoint...
         executor.executeAsync();
diff --git a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java
index d82a503..ea56464 100644
--- a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandIteratorTest.java
@@ -42,6 +42,7 @@ import org.apache.cassandra.locator.ReplicaPlans;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.CloseableIterator;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 public class RangeCommandIteratorTest
@@ -104,27 +105,27 @@ public class RangeCommandIteratorTest
 
         // without range merger, there will be 2 batches requested: 1st batch with 1 range and 2nd batch with remaining ranges
         CloseableIterator<ReplicaPlan.ForRangeRead> replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
-        RangeCommandIterator data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, System.nanoTime());
+        RangeCommandIterator data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 2, vnodeCount);
 
         // without range merger and initial cf=5, there will be 1 batches requested: 5 vnode ranges for 1st batch
         replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
-        data = new RangeCommandIterator(replicaPlans, command, vnodeCount, 1000, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, vnodeCount, 1000, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 1, vnodeCount);
 
         // without range merger and max cf=1, there will be 5 batches requested: 1 vnode range per batch
         replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
-        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, vnodeCount, vnodeCount);
 
         // with range merger, there will be only 1 batch requested, as all ranges share the same replica - localhost
         replicaPlans = replicaPlanIterator(keyRange, keyspace, true);
-        data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 1, vnodeCount);
 
         // with range merger and max cf=1, there will be only 1 batch requested, as all ranges share the same replica - localhost
         replicaPlans = replicaPlanIterator(keyRange, keyspace, true);
-        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, System.nanoTime());
+        data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, nanoTime());
         verifyRangeCommandIterator(data, rows, 1, vnodeCount);
     }
 
diff --git a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java
index 294be2a..259a65f 100644
--- a/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/range/RangeCommandsTest.java
@@ -39,6 +39,7 @@ import org.apache.cassandra.index.StubIndex;
 import org.apache.cassandra.schema.IndexMetadata;
 
 import static org.apache.cassandra.db.ConsistencyLevel.ONE;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -73,7 +74,7 @@ public class RangeCommandsTest extends CQLTester
 
         // verify that a low concurrency factor is not capped by the max concurrency factor
         PartitionRangeReadCommand command = command(cfs, 50, 50);
-        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, System.nanoTime());
+        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, nanoTime());
              ReplicaPlanIterator ranges = new ReplicaPlanIterator(command.dataRange().keyRange(), keyspace, ONE))
         {
             assertEquals(2, partitions.concurrencyFactor());
@@ -83,7 +84,7 @@ public class RangeCommandsTest extends CQLTester
 
         // verify that a high concurrency factor is capped by the max concurrency factor
         command = command(cfs, 1000, 50);
-        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, System.nanoTime());
+        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, nanoTime());
              ReplicaPlanIterator ranges = new ReplicaPlanIterator(command.dataRange().keyRange(), keyspace, ONE))
         {
             assertEquals(MAX_CONCURRENCY_FACTOR, partitions.concurrencyFactor());
@@ -93,7 +94,7 @@ public class RangeCommandsTest extends CQLTester
 
         // with 0 estimated results per range the concurrency factor should be 1
         command = command(cfs, 1000, 0);
-        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, System.nanoTime());
+        try (RangeCommandIterator partitions = RangeCommands.rangeCommandIterator(command, ONE, nanoTime());
              ReplicaPlanIterator ranges = new ReplicaPlanIterator(command.dataRange().keyRange(), keyspace, ONE))
         {
             assertEquals(1, partitions.concurrencyFactor());
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
index d36808f..7587993 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
@@ -80,6 +80,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 import static org.apache.cassandra.locator.Replica.fullReplica;
 import static org.apache.cassandra.locator.ReplicaUtils.FULL_RANGE;
 import static org.apache.cassandra.net.Verb.INTERNAL_RSP;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 @Ignore
 public abstract  class AbstractReadRepairTest
@@ -98,7 +99,7 @@ public abstract  class AbstractReadRepairTest
     static EndpointsForRange replicas;
     static ReplicaPlan.ForRead<?> replicaPlan;
 
-    static long now = TimeUnit.NANOSECONDS.toMicros(System.nanoTime());
+    static long now = TimeUnit.NANOSECONDS.toMicros(nanoTime());
     static DecoratedKey key;
     static Cell<?> cell1;
     static Cell<?> cell2;
@@ -320,7 +321,7 @@ public abstract  class AbstractReadRepairTest
 
     public InstrumentedReadRepair createInstrumentedReadRepair(ReplicaPlan.Shared<?, ?> replicaPlan)
     {
-        return createInstrumentedReadRepair(command, replicaPlan, System.nanoTime());
+        return createInstrumentedReadRepair(command, replicaPlan, nanoTime());
 
     }
 
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java
index 43a1275..0666eb1 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/BlockingReadRepairTest.java
@@ -43,6 +43,9 @@ import org.apache.cassandra.locator.ReplicaUtils;
 import org.apache.cassandra.net.Message;
 import org.apache.cassandra.service.reads.ReadCallback;
 
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class BlockingReadRepairTest extends AbstractReadRepairTest
 {
     private static class InstrumentedReadRepairHandler
@@ -287,6 +290,6 @@ public class BlockingReadRepairTest extends AbstractReadRepairTest
 
     private boolean getCurrentRepairStatus(BlockingPartitionRepair handler)
     {
-        return handler.awaitRepairsUntil(System.nanoTime(), TimeUnit.NANOSECONDS);
+        return handler.awaitRepairsUntil(nanoTime(), NANOSECONDS);
     }
 }
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java
index ae83efb..713cef6 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/DiagEventsBlockingReadRepairTest.java
@@ -49,6 +49,9 @@ import org.apache.cassandra.net.Message;
 import org.apache.cassandra.service.reads.ReadCallback;
 import org.apache.cassandra.service.reads.repair.ReadRepairEvent.ReadRepairEventType;
 
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 /**
  * Variation of {@link BlockingReadRepair} using diagnostic events instead of instrumentation for test validation.
  */
@@ -110,7 +113,7 @@ public class DiagEventsBlockingReadRepairTest extends AbstractReadRepairTest
 
     private boolean getCurrentRepairStatus(BlockingPartitionRepair handler)
     {
-        return handler.awaitRepairsUntil(System.nanoTime(), TimeUnit.NANOSECONDS);
+        return handler.awaitRepairsUntil(nanoTime(), NANOSECONDS);
     }
 
     public InstrumentedReadRepair createInstrumentedReadRepair(ReadCommand command, ReplicaPlan.Shared<?,?> replicaPlan, long queryStartNanoTime)
diff --git a/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java b/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java
index dad9aa4..82de4f3 100644
--- a/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java
+++ b/test/unit/org/apache/cassandra/service/reads/repair/ReadRepairTest.java
@@ -58,7 +58,9 @@ import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static org.apache.cassandra.locator.ReplicaUtils.full;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class ReadRepairTest
 {
@@ -86,7 +88,7 @@ public class ReadRepairTest
         }
     }
 
-    static long now = TimeUnit.NANOSECONDS.toMicros(System.nanoTime());
+    static long now = TimeUnit.NANOSECONDS.toMicros(nanoTime());
     static DecoratedKey key;
     static Cell<?> cell1;
     static Cell<?> cell2;
@@ -348,6 +350,6 @@ public class ReadRepairTest
 
     private boolean getCurrentRepairStatus(BlockingPartitionRepair handler)
     {
-        return handler.awaitRepairsUntil(System.nanoTime(), TimeUnit.NANOSECONDS);
+        return handler.awaitRepairsUntil(nanoTime(), NANOSECONDS);
     }
 }
diff --git a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
index c1678d7..0fea448 100644
--- a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
+++ b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java
@@ -66,7 +66,7 @@ public abstract class OfflineToolUtils
     "ScheduledTasks:[1-9]",
     "OptionalTasks:[1-9]",
     "Reference-Reaper:[1-9]",
-    "LocalPool-Cleaner:[1-9]",
+    "LocalPool-Cleaner(-networking|-chunk-cache)?:[1-9]",
     "CacheCleanupExecutor:[1-9]",
     "CompactionExecutor:[1-9]",
     "ValidationExecutor:[1-9]",
@@ -76,6 +76,9 @@ public abstract class OfflineToolUtils
     "Strong-Reference-Leak-Detector:[1-9]",
     "Background_Reporter:[1-9]",
     "EXPIRING-MAP-REAPER:[1-9]",
+    "ObjectCleanerThread",
+    "process reaper"   // spawned by the jvm when executing external processes
+                       // and may still be active when we check
     };
 
     public void assertNoUnexpectedThreadsStarted(String[] expectedThreadNames, String[] optionalThreadNames)
diff --git a/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java b/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java
index 31111bd..cd59aa1 100644
--- a/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java
+++ b/test/unit/org/apache/cassandra/triggers/TriggersSchemaTest.java
@@ -32,13 +32,14 @@ import org.apache.cassandra.schema.TriggerMetadata;
 import org.apache.cassandra.schema.Triggers;
 import org.apache.cassandra.schema.MigrationManager;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.*;
 
 public class TriggersSchemaTest
 {
-    String ksName = "ks" + System.nanoTime();
-    String cfName = "cf" + System.nanoTime();
-    String triggerName = "trigger_" + System.nanoTime();
+    String ksName = "ks" + nanoTime();
+    String cfName = "cf" + nanoTime();
+    String triggerName = "trigger_" + nanoTime();
     String triggerClass = "org.apache.cassandra.triggers.NoSuchTrigger.class";
 
     @BeforeClass
diff --git a/test/unit/org/apache/cassandra/triggers/TriggersTest.java b/test/unit/org/apache/cassandra/triggers/TriggersTest.java
index 2cf0e84..1d32f53 100644
--- a/test/unit/org/apache/cassandra/triggers/TriggersTest.java
+++ b/test/unit/org/apache/cassandra/triggers/TriggersTest.java
@@ -38,6 +38,7 @@ import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.toInt;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -123,7 +124,7 @@ public class TriggersTest
     @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class)
     public void onCqlUpdateWithConditionsRejectGeneratedUpdatesForDifferentPartition() throws Exception
     {
-        String cf = "cf" + System.nanoTime();
+        String cf = "cf" + nanoTime();
         try
         {
             setupTableWithTrigger(cf, CrossPartitionTrigger.class);
@@ -139,7 +140,7 @@ public class TriggersTest
     @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class)
     public void onCqlUpdateWithConditionsRejectGeneratedUpdatesForDifferentTable() throws Exception
     {
-        String cf = "cf" + System.nanoTime();
+        String cf = "cf" + nanoTime();
         try
         {
             setupTableWithTrigger(cf, CrossTableTrigger.class);
@@ -155,7 +156,7 @@ public class TriggersTest
     @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class)
     public void ifTriggerThrowsErrorNoMutationsAreApplied() throws Exception
     {
-        String cf = "cf" + System.nanoTime();
+        String cf = "cf" + nanoTime();
         try
         {
             setupTableWithTrigger(cf, ErrorTrigger.class);
diff --git a/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java b/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java
index b2891a9..06b4418 100644
--- a/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java
+++ b/test/unit/org/apache/cassandra/utils/MonotonicClockTest.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.utils;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.MonotonicClock.approxTime;
 import static org.junit.Assert.*;
 
@@ -27,7 +28,7 @@ public class MonotonicClockTest
     @Test
     public void testTimestampOrdering() throws Exception
     {
-        long nowNanos = System.nanoTime();
+        long nowNanos = nanoTime();
         long now = System.currentTimeMillis();
         long lastConverted = 0;
         for (long ii = 0; ii < 10000000; ii++)
@@ -39,7 +40,7 @@ public class MonotonicClockTest
                 Thread.sleep(1);
             }
 
-            nowNanos = Math.max(nowNanos, System.nanoTime());
+            nowNanos = Math.max(nowNanos, nanoTime());
             long convertedNow = approxTime.translate().toMillisSinceEpoch(nowNanos);
 
             int maxDiff = FBUtilities.isWindows ? 15 : 1;
diff --git a/test/unit/org/apache/cassandra/utils/SlidingTimeRateTest.java b/test/unit/org/apache/cassandra/utils/SlidingTimeRateTest.java
deleted file mode 100644
index 8dc4a14..0000000
--- a/test/unit/org/apache/cassandra/utils/SlidingTimeRateTest.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * No objects are created currently from SlidingTimeRate in Cassandra 4.0.
- * If you decide to use it, please check CASSANDRA-16713.
- * There still might be a bug, flaky test to be fixed before using it again.
- *
- * Skipping all tests for running now to clean he noise before 4.0 GA release.
- */
-public class SlidingTimeRateTest
-{
-    @Ignore
-    @Test
-    public void testUpdateAndGet()
-    {
-        SlidingTimeRate rate = new SlidingTimeRate(new TestTimeSource(), 10, 1, TimeUnit.SECONDS);
-        int updates = 100;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-        }
-        Assert.assertEquals(updates, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testUpdateAndGetBetweenWindows()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 100;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-            time.sleep(100, TimeUnit.MILLISECONDS);
-        }
-        Assert.assertEquals(10, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testUpdateAndGetPastWindowSize()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 100;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-        }
-
-        time.sleep(6, TimeUnit.SECONDS);
-
-        Assert.assertEquals(0, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testUpdateAndGetToPointInTime()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 10;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-            time.sleep(100, TimeUnit.MILLISECONDS);
-        }
-
-        time.sleep(1, TimeUnit.SECONDS);
-
-        Assert.assertEquals(5, rate.get(TimeUnit.SECONDS), 0.0);
-        Assert.assertEquals(10, rate.get(1, TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testDecay() throws InterruptedException
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 10;
-        for (int i = 0; i < updates; i++)
-        {
-            rate.update(1);
-            time.sleep(100, TimeUnit.MILLISECONDS);
-        }
-        Assert.assertEquals(10, rate.get(TimeUnit.SECONDS), 0.0);
-
-        time.sleep(1, TimeUnit.SECONDS);
-
-        Assert.assertEquals(5, rate.get(TimeUnit.SECONDS), 0.0);
-
-        time.sleep(2, TimeUnit.SECONDS);
-
-        Assert.assertEquals(2.5, rate.get(TimeUnit.SECONDS), 0.0);
-    }
-
-    @Ignore
-    @Test
-    public void testPruning()
-    {
-        TestTimeSource time = new TestTimeSource();
-        SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-
-        rate.update(1);
-        Assert.assertEquals(1, rate.size());
-
-        time.sleep(6, TimeUnit.SECONDS);
-
-        rate.prune();
-        Assert.assertEquals(0, rate.size());
-    }
-
-    @Ignore
-    @Test
-    public void testConcurrentUpdateAndGet() throws InterruptedException
-    {
-        final ExecutorService executor = Executors.newFixedThreadPool(FBUtilities.getAvailableProcessors());
-        final TestTimeSource time = new TestTimeSource();
-        final SlidingTimeRate rate = new SlidingTimeRate(time, 5, 1, TimeUnit.SECONDS);
-        int updates = 100000;
-        for (int i = 0; i < updates; i++)
-        {
-            executor.submit(() -> {
-                time.sleep(1, TimeUnit.MILLISECONDS);
-                rate.update(1);
-            });
-        }
-
-        executor.shutdown();
-
-        Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
-        Assert.assertEquals(1000, rate.get(TimeUnit.SECONDS), 100.0);
-    }
-}
diff --git a/test/unit/org/apache/cassandra/utils/TestTimeSource.java b/test/unit/org/apache/cassandra/utils/TestTimeSource.java
deleted file mode 100644
index 4ecd086..0000000
--- a/test/unit/org/apache/cassandra/utils/TestTimeSource.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-public class TestTimeSource implements TimeSource
-{
-    private final AtomicLong timeInMillis = new AtomicLong(System.currentTimeMillis());
-
-    @Override
-    public long currentTimeMillis()
-    {
-        return timeInMillis.get();
-    }
-
-    @Override
-    public long nanoTime()
-    {
-        return timeInMillis.get() * 1_000_000;
-    }
-
-    @Override
-    public TimeSource sleep(long sleepFor, TimeUnit unit)
-    {
-        long current = timeInMillis.get();
-        long sleepInMillis = TimeUnit.MILLISECONDS.convert(sleepFor, unit);
-        boolean elapsed;
-        do
-        {
-            long newTime = current + sleepInMillis;
-            elapsed = timeInMillis.compareAndSet(current, newTime);
-            if (!elapsed)
-            {
-                long updated = timeInMillis.get();
-                if (updated - current >= sleepInMillis)
-                {
-                    elapsed = true;
-                }
-                else
-                {
-                    sleepInMillis -= updated - current;
-                    current = updated;
-                }
-            }
-        }
-        while (!elapsed);
-        return this;
-    }
-
-    @Override
-    public TimeSource sleepUninterruptibly(long sleepFor, TimeUnit unit)
-    {
-        return sleep(sleepFor, unit);
-    }
-}
diff --git a/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java b/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java
index 311b924..3a21ed1 100644
--- a/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java
+++ b/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java
@@ -40,6 +40,7 @@ import net.openhft.chronicle.queue.RollCycles;
 import net.openhft.chronicle.wire.WireOut;
 import org.apache.cassandra.Util;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
@@ -50,7 +51,7 @@ public class BinLogTest
 {
     public static Path tempDir() throws Exception
     {
-        return Files.createTempDirectory("binlogtest" + System.nanoTime());
+        return Files.createTempDirectory("binlogtest" + nanoTime());
     }
 
     private static final String testString = "ry@nlikestheyankees";
diff --git a/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java b/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
index bc6756b..c087cab 100644
--- a/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
+++ b/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
@@ -58,6 +58,8 @@ import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+
 /**
  * Utility to write SSTables.
  * <p>
@@ -245,7 +247,7 @@ public class StressCQLSSTableWriter implements Closeable
         List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
         SortedSet<Clustering<?>> clusterings = insert.createClustering(options);
 
-        long now = System.currentTimeMillis();
+        long now = currentTimeMillis();
         // Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
         // and that forces a lot of initialization that we don't want.
         UpdateParameters params = new UpdateParameters(insert.metadata(),
diff --git a/tools/stress/src/org/apache/cassandra/stress/StressAction.java b/tools/stress/src/org/apache/cassandra/stress/StressAction.java
index 3268182..f9912ac 100644
--- a/tools/stress/src/org/apache/cassandra/stress/StressAction.java
+++ b/tools/stress/src/org/apache/cassandra/stress/StressAction.java
@@ -39,6 +39,8 @@ import org.jctools.queues.SpscUnboundedArrayQueue;
 
 import com.google.common.util.concurrent.Uninterruptibles;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 public class StressAction implements Runnable
 {
 
@@ -315,7 +317,7 @@ public class StressAction implements Runnable
 
         void start()
         {
-            start = System.nanoTime();
+            start = nanoTime();
         }
 
         /**
@@ -361,7 +363,7 @@ public class StressAction implements Runnable
                 long intendedTime = rateLimiter.acquire(partitionCount);
                 op.intendedStartNs(intendedTime);
                 long now;
-                while ((now = System.nanoTime()) < intendedTime)
+                while ((now = nanoTime()) < intendedTime)
                 {
                     LockSupport.parkNanos(intendedTime - now);
                 }
diff --git a/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java b/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java
index b2afd1b..b50dfd2 100644
--- a/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java
+++ b/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java
@@ -54,7 +54,10 @@ import org.apache.cassandra.stress.util.ResultLogger;
 import org.apache.cassandra.stress.util.Uncertainty;
 import org.apache.cassandra.utils.FBUtilities;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class StressMetrics implements MeasurementSink
 {
@@ -65,8 +68,8 @@ public class StressMetrics implements MeasurementSink
     private final CountDownLatch stopped = new CountDownLatch(1);
     private final Callable<JmxCollector.GcStats> gcStatsCollector;
     private final HistogramLogWriter histogramWriter;
-    private final long epochNs = System.nanoTime();
-    private final long epochMs = System.currentTimeMillis();
+    private final long epochNs = nanoTime();
+    private final long epochMs = currentTimeMillis();
 
     private volatile JmxCollector.GcStats totalGcStats = new GcStats(0);
 
@@ -159,10 +162,10 @@ public class StressMetrics implements MeasurementSink
     private void reportingLoop(final long logIntervalMillis)
     {
         // align report timing to the nearest second
-        final long currentTimeMs = System.currentTimeMillis();
+        final long currentTimeMs = currentTimeMillis();
         final long startTimeMs = currentTimeMs - (currentTimeMs % 1000);
         // reporting interval starts rounded to the second
-        long reportingStartNs = (System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(currentTimeMs - startTimeMs));
+        long reportingStartNs = (nanoTime() - MILLISECONDS.toNanos(currentTimeMs - startTimeMs));
         final long parkIntervalNs = TimeUnit.MILLISECONDS.toNanos(logIntervalMillis);
         try
         {
@@ -178,7 +181,7 @@ public class StressMetrics implements MeasurementSink
                 reportingStartNs += parkIntervalNs;
             }
 
-            final long end = System.nanoTime();
+            final long end = nanoTime();
             recordInterval(end, end - reportingStartNs);
         }
         catch (Exception e)
@@ -198,7 +201,7 @@ public class StressMetrics implements MeasurementSink
     {
         long parkFor;
         while (!stop &&
-               (parkFor = until - System.nanoTime()) > 0)
+               (parkFor = until - nanoTime()) > 0)
         {
             LockSupport.parkNanos(parkFor);
         }
diff --git a/tools/stress/src/org/apache/cassandra/stress/report/Timer.java b/tools/stress/src/org/apache/cassandra/stress/report/Timer.java
index b3df52f..0937721 100644
--- a/tools/stress/src/org/apache/cassandra/stress/report/Timer.java
+++ b/tools/stress/src/org/apache/cassandra/stress/report/Timer.java
@@ -23,6 +23,8 @@ package org.apache.cassandra.stress.report;
 
 import org.apache.cassandra.stress.StressAction.MeasurementSink;
 
+import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+
 // a timer - this timer must be used by a single thread, and co-ordinates with other timers by
 public final class Timer
 {
@@ -42,7 +44,7 @@ public final class Timer
 
     public void stop(long partitionCount, long rowCount, boolean error)
     {
-        sink.record(opType, intendedTimeNs, startTimeNs, System.nanoTime(), rowCount, partitionCount, error);
+        sink.record(opType, intendedTimeNs, startTimeNs, nanoTime(), rowCount, partitionCount, error);
         resetTimes();
     }
 
@@ -58,6 +60,6 @@ public final class Timer
 
     public void start()
     {
-        startTimeNs = System.nanoTime();
+        startTimeNs = nanoTime();
     }
 }
\ No newline at end of file

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org