You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by dc...@apache.org on 2020/08/19 22:27:18 UTC

[cassandra] branch cassandra-3.0 updated: Handle difference in timestamp precision between java8 and java11 in LogFIle.java

This is an automated email from the ASF dual-hosted git repository.

dcapwell pushed a commit to branch cassandra-3.0
in repository https://gitbox.apache.org/repos/asf/cassandra.git


The following commit(s) were added to refs/heads/cassandra-3.0 by this push:
     new c9ba33c  Handle difference in timestamp precision between java8 and java11 in LogFIle.java
c9ba33c is described below

commit c9ba33c334c813ece786dff57a853589852a1b65
Author: Marcus Eriksson <ma...@apache.org>
AuthorDate: Wed Aug 19 11:29:11 2020 -0700

    Handle difference in timestamp precision between java8 and java11 in LogFIle.java
    
    patch by Marcus Eriksson; reviewed by David Capwell for CASSANDRA-16050
---
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/lifecycle/LogFile.java | 19 ++++++--
 .../cassandra/db/lifecycle/LogTransactionTest.java | 53 ++++++++++++++++++++++
 3 files changed, 70 insertions(+), 3 deletions(-)

diff --git a/CHANGES.txt b/CHANGES.txt
index 9b4f8c3..2d66ee2 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -6,6 +6,7 @@
  * 3.x fails to start if commit log has range tombstones from a column which is also deleted (CASSANDRA-15970)
  * Forbid altering UDTs used in partition keys (CASSANDRA-15933)
  * Fix empty/null json string representation (CASSANDRA-15896)
+ * Handle difference in timestamp precision between java8 and java11 in LogFIle.java (CASSANDRA-16050)
 Merged from 2.2:
  * Fix CQL parsing of collections when the column type is reversed (CASSANDRA-15814)
 
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
index 6e820df..ac64f13 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
@@ -231,18 +231,31 @@ final class LogFile implements AutoCloseable
         // it matches. Because we delete files from oldest to newest, the latest update time should
         // always match.
         record.status.onDiskRecord = record.withExistingFiles(existingFiles);
-        if (record.updateTime != record.status.onDiskRecord.updateTime && record.status.onDiskRecord.updateTime > 0)
+        // we can have transaction files with mismatching updateTime resolutions due to switching between jdk8 and jdk11, truncate both to be consistent:
+        if (truncateMillis(record.updateTime) != truncateMillis(record.status.onDiskRecord.updateTime) && record.status.onDiskRecord.updateTime > 0)
         {
-            record.setError(String.format("Unexpected files detected for sstable [%s], " +
-                                          "record [%s]: last update time [%tT] should have been [%tT]",
+            record.setError(String.format("Unexpected files detected for sstable [%s]: " +
+                                          "record [%s]: last update time [%tc] (%d) should have been [%tc] (%d)",
                                           record.fileName(),
                                           record,
                                           record.status.onDiskRecord.updateTime,
+                                          record.status.onDiskRecord.updateTime,
+                                          record.updateTime,
                                           record.updateTime));
 
         }
     }
 
+    /**
+     * due to difference in timestamp resolution between jdk8 and 11 we need to return second resolution here (number
+     * should end in 000): https://bugs.openjdk.java.net/browse/JDK-8177809
+     */
+    static long truncateMillis(long lastModified)
+    {
+        return lastModified - (lastModified % 1000);
+    }
+
+
     static void verifyRecordWithCorruptedLastRecord(LogRecord record)
     {
         if (record.type == Type.REMOVE && record.status.onDiskRecord.numFiles < record.numFiles)
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java
index 2544a0d..7ba1c39 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java
@@ -1106,6 +1106,59 @@ public class LogTransactionTest extends AbstractTransactionalTest
     }
 
     @Test
+    public void testTruncateFileUpdateTime() throws IOException
+    {
+        // Idea is that we truncate the actual modification time on disk after creating the log file.
+        // On java11 this would fail since we would have millisecond resolution in the log file, but
+        // then the file gives second resolution.
+        testTruncatedModificationTimesHelper(sstable ->
+                                  {
+                                      // increase the modification time of the Data file
+                                      for (String filePath : sstable.getAllFilePaths())
+                                      {
+                                          File f = new File(filePath);
+                                          long lastModified = f.lastModified();
+                                          f.setLastModified(lastModified - (lastModified % 1000));
+                                      }
+                                  });
+    }
+
+    private static void testTruncatedModificationTimesHelper(Consumer<SSTableReader> modifier) throws IOException
+    {
+        ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
+        File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
+        SSTableReader sstableOld = sstable(dataFolder, cfs, 0, 128);
+        SSTableReader sstableNew = sstable(dataFolder, cfs, 1, 128);
+
+        // simulate tracking sstables with a committed transaction except the checksum will be wrong
+        LogTransaction log = new LogTransaction(OperationType.COMPACTION);
+        assertNotNull(log);
+
+        log.trackNew(sstableNew);
+        LogTransaction.SSTableTidier tidier = log.obsoleted(sstableOld);
+
+        //modify the old sstable files
+        modifier.accept(sstableOld);
+
+        //Fake a commit
+        log.txnFile().commit();
+
+        LogTransaction.removeUnfinishedLeftovers(cfs.metadata);
+
+        // only the new files should be there
+        assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths()));
+        sstableNew.selfRef().release();
+
+        // complete the transaction to avoid LEAK errors
+        assertNull(log.complete(null));
+
+        assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths()));
+
+        // make sure to run the tidier to avoid any leaks in the logs
+        tidier.run();
+    }
+
+    @Test
     public void testGetTemporaryFilesSafeAfterObsoletion() throws Throwable
     {
         ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org