You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by br...@apache.org on 2020/04/27 16:20:50 UTC

[cassandra] branch cassandra-3.11 updated (377ceb6 -> c190ab9)

This is an automated email from the ASF dual-hosted git repository.

brandonwilliams pushed a change to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git.


    from 377ceb6  Merge branch 'cassandra-3.0' into cassandra-3.11
     new 04be736  Fix chunk index overflow due to large sstable with small chunk length
     new c190ab9  Merge branch 'cassandra-3.0' into cassandra-3.11

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CHANGES.txt                                        |  1 +
 .../cassandra/io/compress/CompressionMetadata.java |  6 +-
 .../compress/CompressedRandomAccessReaderTest.java | 86 +++++++++++++++++-----
 3 files changed, 73 insertions(+), 20 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra] 02/02: Merge branch 'cassandra-3.0' into cassandra-3.11

Posted by br...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

brandonwilliams pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit c190ab9fcbc8a993e570e6cdafc2b90219f80109
Merge: 377ceb6 04be736
Author: Brandon Williams <br...@apache.org>
AuthorDate: Mon Apr 27 11:15:30 2020 -0500

    Merge branch 'cassandra-3.0' into cassandra-3.11

 CHANGES.txt                                        |  1 +
 .../cassandra/io/compress/CompressionMetadata.java |  6 +-
 .../compress/CompressedRandomAccessReaderTest.java | 86 +++++++++++++++++-----
 3 files changed, 73 insertions(+), 20 deletions(-)

diff --cc CHANGES.txt
index fd7ad6a,99f540c..b3c593b
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,7 -1,5 +1,8 @@@
 -3.0.21
 +3.11.7
 + * Allow sstableloader to use SSL on the native port (CASSANDRA-14904)
 +Merged from 3.0:
+  * Fix chunk index overflow due to large sstable with small chunk length (CASSANDRA-15595)
 + * Allow selecting static column only when querying static index (CASSANDRA-14242)
   * cqlsh return non-zero status when STDIN CQL fails (CASSANDRA-15623)
   * Don't skip sstables in slice queries based only on local min/max/deletion timestamp (CASSANDRA-15690)
   * Memtable memory allocations may deadlock (CASSANDRA-15367)
diff --cc test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index 0d2a9fb,34ff94f..c718147
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@@ -123,42 -118,77 +125,61 @@@ public class CompressedRandomAccessRead
          }
      }
  
-     private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize) throws IOException
+     /**
+      * JIRA: CASSANDRA-15595 verify large position with small chunk length won't overflow chunk index
+      */
+     @Test
+     public void testChunkIndexOverflow() throws IOException
      {
-         final String filename = f.getAbsolutePath();
-         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-         try(SequentialWriter writer = compressed
-                 ? new CompressedSequentialWriter(f, filename + ".metadata",
-                 null, SequentialWriterOption.DEFAULT,
-                 CompressionParams.snappy(), sstableMetadataCollector)
-                 : new SequentialWriter(f))
+         File file = File.createTempFile("chunk_idx_overflow", "1");
+         String filename = file.getAbsolutePath();
+         int chunkLength = 4096; // 4k
+ 
+         try
          {
-             writer.write("The quick ".getBytes());
-             DataPosition mark = writer.mark();
-             writer.write("blue fox jumps over the lazy dog".getBytes());
+             writeSSTable(file, CompressionParams.snappy(chunkLength), 10);
+             CompressionMetadata metadata = new CompressionMetadata(filename + ".metadata", file.length(), ChecksumType.CRC32);
  
-             // write enough to be sure to change chunk
-             for (int i = 0; i < junkSize; ++i)
+             long chunks = 2761628520L;
+             long midPosition = (chunks / 2L) * chunkLength;
+             int idx = 8 * (int) (midPosition / chunkLength); // before patch
+             assertTrue("Expect integer overflow", idx < 0);
+ 
+             try
              {
-                 writer.write((byte) 1);
+                 metadata.chunkFor(midPosition);
+                 fail("Expected to throw EOF exception with chunk idx larger than total number of chunks in the sstable");
+             }
+             catch (CorruptSSTableException e)
+             {
+                 assertTrue("Expect EOF, but got " + e.getCause(), e.getCause() instanceof EOFException);
              }
- 
-             writer.resetAndTruncate(mark);
-             writer.write("brown fox jumps over the lazy dog".getBytes());
-             writer.finish();
          }
-         assert f.exists();
+         finally
+         {
+             if (file.exists())
+                 assertTrue(file.delete());
+             File metadata = new File(filename + ".metadata");
+             if (metadata.exists())
+                 metadata.delete();
+         }
+     }
+ 
+     private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize) throws IOException
+     {
+         final String filename = f.getAbsolutePath();
 -        try(ChannelProxy channel = new ChannelProxy(f))
 -        {
 -            writeSSTable(f, compressed ? CompressionParams.snappy() : null, junkSize);
 -
 -            CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32) : null;
 -            RandomAccessReader.Builder builder = compressed
 -                                                 ? new CompressedRandomAccessReader.Builder(channel, compressionMetadata)
 -                                                 : new RandomAccessReader.Builder(channel);
 -
 -            if (usemmap)
 -            {
 -                if (compressed)
 -                    builder.regions(MmappedRegions.map(channel, compressionMetadata));
 -                else
 -                    builder.regions(MmappedRegions.map(channel, f.length()));
 -            }
++        writeSSTable(f, compressed ? CompressionParams.snappy() : null, junkSize);
  
 -            try(RandomAccessReader reader = builder.build())
 -            {
 -                String expected = "The quick brown fox jumps over the lazy dog";
 -                assertEquals(expected.length(), reader.length());
 -                byte[] b = new byte[expected.length()];
 -                reader.readFully(b);
 -                assert new String(b).equals(expected) : "Expecting '" + expected + "', got '" + new String(b) + '\'';
 -            }
 -
 -            if (usemmap)
 -                builder.regions.close();
 +        CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32) : null;
 +        try (FileHandle.Builder builder = new FileHandle.Builder(filename).mmapped(usemmap).withCompressionMetadata(compressionMetadata);
 +             FileHandle fh = builder.complete();
 +             RandomAccessReader reader = fh.createReader())
 +        {
 +            String expected = "The quick brown fox jumps over the lazy dog";
 +            assertEquals(expected.length(), reader.length());
 +            byte[] b = new byte[expected.length()];
 +            reader.readFully(b);
 +            assert new String(b).equals(expected) : "Expecting '" + expected + "', got '" + new String(b) + '\'';
          }
          finally
          {
@@@ -170,6 -200,31 +191,33 @@@
          }
      }
  
+     private static void writeSSTable(File f, CompressionParams params, int junkSize) throws IOException
+     {
+         final String filename = f.getAbsolutePath();
+         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
+         try(SequentialWriter writer = params != null
 -                                      ? new CompressedSequentialWriter(f, filename + ".metadata", params, sstableMetadataCollector)
 -                                      : SequentialWriter.open(f))
++                ? new CompressedSequentialWriter(f, filename + ".metadata",
++                                                 null, SequentialWriterOption.DEFAULT,
++                                                 params, sstableMetadataCollector)
++                : new SequentialWriter(f))
+         {
+             writer.write("The quick ".getBytes());
+             DataPosition mark = writer.mark();
+             writer.write("blue fox jumps over the lazy dog".getBytes());
+ 
+             // write enough to be sure to change chunk
+             for (int i = 0; i < junkSize; ++i)
+             {
+                 writer.write((byte) 1);
+             }
+ 
+             writer.resetAndTruncate(mark);
+             writer.write("brown fox jumps over the lazy dog".getBytes());
+             writer.finish();
+         }
+         assert f.exists();
+     }
+ 
      /**
       * If the data read out doesn't match the checksum, an exception should be thrown
       */


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra] 01/02: Fix chunk index overflow due to large sstable with small chunk length

Posted by br...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

brandonwilliams pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 04be7366fcf1b8edeb3495adcf37a171aa188d5c
Author: Zhao Yang <zh...@gmail.com>
AuthorDate: Mon Apr 27 01:09:27 2020 +0800

    Fix chunk index overflow due to large sstable with small chunk length
    
    patch by ZhaoYang; reviewed by brandonwilliams for CASSANDRA-15595
---
 CHANGES.txt                                        |   1 +
 .../cassandra/io/compress/CompressionMetadata.java |   6 +-
 .../compress/CompressedRandomAccessReaderTest.java | 100 +++++++++++++++------
 3 files changed, 80 insertions(+), 27 deletions(-)

diff --git a/CHANGES.txt b/CHANGES.txt
index 8d1c8ef..99f540c 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 3.0.21
+ * Fix chunk index overflow due to large sstable with small chunk length (CASSANDRA-15595)
  * cqlsh return non-zero status when STDIN CQL fails (CASSANDRA-15623)
  * Don't skip sstables in slice queries based only on local min/max/deletion timestamp (CASSANDRA-15690)
  * Memtable memory allocations may deadlock (CASSANDRA-15367)
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index 101f722..10d1ae9 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -227,11 +227,15 @@ public class CompressionMetadata
     public Chunk chunkFor(long position)
     {
         // position of the chunk
-        int idx = 8 * (int) (position / parameters.chunkLength());
+        long idx = 8 * (position / parameters.chunkLength());
 
         if (idx >= chunkOffsetsSize)
             throw new CorruptSSTableException(new EOFException(), indexFilePath);
 
+        if (idx < 0)
+            throw new CorruptSSTableException(new IllegalArgumentException(String.format("Invalid negative chunk index %d with position %d", idx, position)),
+                                              indexFilePath);
+
         long chunkOffset = chunkOffsets.getLong(idx);
         long nextChunkOffset = (idx + 8 == chunkOffsetsSize)
                                 ? compressedFileLength
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index 0c96327..34ff94f 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@ -18,6 +18,7 @@
  */
 package org.apache.cassandra.io.compress;
 
+import java.io.EOFException;
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
@@ -43,6 +44,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class CompressedRandomAccessReaderTest
 {
@@ -75,11 +77,11 @@ public class CompressedRandomAccessReaderTest
     {
         File f = File.createTempFile("compressed6791_", "3");
         String filename = f.getAbsolutePath();
-        try(ChannelProxy channel = new ChannelProxy(f))
+        try (ChannelProxy channel = new ChannelProxy(f))
         {
 
             MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-            try(CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", CompressionParams.snappy(32), sstableMetadataCollector))
+            try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", CompressionParams.snappy(32), sstableMetadataCollector))
             {
 
                 for (int i = 0; i < 20; i++)
@@ -97,9 +99,9 @@ public class CompressedRandomAccessReaderTest
                 writer.finish();
             }
 
-            try(RandomAccessReader reader = new CompressedRandomAccessReader.Builder(channel,
-                                                                                     new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32))
-                                            .build())
+            try (RandomAccessReader reader = new CompressedRandomAccessReader.Builder(channel,
+                                                                                      new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32))
+                    .build())
             {
                 String res = reader.readLine();
                 assertEquals(res, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
@@ -110,37 +112,58 @@ public class CompressedRandomAccessReaderTest
         {
             if (f.exists())
                 assertTrue(f.delete());
-            File metadata = new File(filename+ ".metadata");
+            File metadata = new File(filename + ".metadata");
             if (metadata.exists())
                 metadata.delete();
         }
     }
 
-    private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize) throws IOException
+    /**
+     * JIRA: CASSANDRA-15595 verify large position with small chunk length won't overflow chunk index
+     */
+    @Test
+    public void testChunkIndexOverflow() throws IOException
     {
-        final String filename = f.getAbsolutePath();
-        try(ChannelProxy channel = new ChannelProxy(f))
+        File file = File.createTempFile("chunk_idx_overflow", "1");
+        String filename = file.getAbsolutePath();
+        int chunkLength = 4096; // 4k
+
+        try
         {
-            MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-            try(SequentialWriter writer = compressed
-                ? new CompressedSequentialWriter(f, filename + ".metadata", CompressionParams.snappy(), sstableMetadataCollector)
-                : SequentialWriter.open(f))
-            {
-                writer.write("The quick ".getBytes());
-                DataPosition mark = writer.mark();
-                writer.write("blue fox jumps over the lazy dog".getBytes());
+            writeSSTable(file, CompressionParams.snappy(chunkLength), 10);
+            CompressionMetadata metadata = new CompressionMetadata(filename + ".metadata", file.length(), ChecksumType.CRC32);
 
-                // write enough to be sure to change chunk
-                for (int i = 0; i < junkSize; ++i)
-                {
-                    writer.write((byte) 1);
-                }
+            long chunks = 2761628520L;
+            long midPosition = (chunks / 2L) * chunkLength;
+            int idx = 8 * (int) (midPosition / chunkLength); // before patch
+            assertTrue("Expect integer overflow", idx < 0);
 
-                writer.resetAndTruncate(mark);
-                writer.write("brown fox jumps over the lazy dog".getBytes());
-                writer.finish();
+            try
+            {
+                metadata.chunkFor(midPosition);
+                fail("Expected to throw EOF exception with chunk idx larger than total number of chunks in the sstable");
             }
-            assert f.exists();
+            catch (CorruptSSTableException e)
+            {
+                assertTrue("Expect EOF, but got " + e.getCause(), e.getCause() instanceof EOFException);
+            }
+        }
+        finally
+        {
+            if (file.exists())
+                assertTrue(file.delete());
+            File metadata = new File(filename + ".metadata");
+            if (metadata.exists())
+                metadata.delete();
+        }
+    }
+
+    private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize) throws IOException
+    {
+        final String filename = f.getAbsolutePath();
+        try(ChannelProxy channel = new ChannelProxy(f))
+        {
+            writeSSTable(f, compressed ? CompressionParams.snappy() : null, junkSize);
 
             CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32) : null;
             RandomAccessReader.Builder builder = compressed
@@ -177,6 +200,31 @@ public class CompressedRandomAccessReaderTest
         }
     }
 
+    private static void writeSSTable(File f, CompressionParams params, int junkSize) throws IOException
+    {
+        final String filename = f.getAbsolutePath();
+        MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
+        try(SequentialWriter writer = params != null
+                                      ? new CompressedSequentialWriter(f, filename + ".metadata", params, sstableMetadataCollector)
+                                      : SequentialWriter.open(f))
+        {
+            writer.write("The quick ".getBytes());
+            DataPosition mark = writer.mark();
+            writer.write("blue fox jumps over the lazy dog".getBytes());
+
+            // write enough to be sure to change chunk
+            for (int i = 0; i < junkSize; ++i)
+            {
+                writer.write((byte) 1);
+            }
+
+            writer.resetAndTruncate(mark);
+            writer.write("brown fox jumps over the lazy dog".getBytes());
+            writer.finish();
+        }
+        assert f.exists();
+    }
+
     /**
      * If the data read out doesn't match the checksum, an exception should be thrown
      */


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org