You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jz...@apache.org on 2018/05/01 22:30:06 UTC

[09/10] cassandra git commit: Merge branch 'cassandra-3.0' into cassandra-3.11

Merge branch 'cassandra-3.0' into cassandra-3.11


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/783bbb3c
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/783bbb3c
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/783bbb3c

Branch: refs/heads/trunk
Commit: 783bbb3c817e7dbfee8181d210487edc13414ac1
Parents: b67d6fb 733f6b0
Author: Jay Zhuang <ja...@yahoo.com>
Authored: Tue May 1 15:11:22 2018 -0700
Committer: Jay Zhuang <ja...@yahoo.com>
Committed: Tue May 1 15:12:14 2018 -0700

----------------------------------------------------------------------
 CHANGES.txt                                     |   1 +
 .../io/compress/CompressedSequentialWriter.java |  17 ++-
 .../CompressedSequentialWriterReopenTest.java   | 148 +++++++++++++++++++
 .../CompressedSequentialWriterTest.java         |   6 +
 4 files changed, 170 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/783bbb3c/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index c392059,9992802..443c298
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -31,8 -20,10 +31,9 @@@ Merged from 3.0
   * Fully utilise specified compaction threads (CASSANDRA-14210)
   * Pre-create deletion log records to finish compactions quicker (CASSANDRA-12763)
  Merged from 2.2:
+  * Fix compaction failure caused by reading un-flushed data (CASSANDRA-12743)
   * Use Bounds instead of Range for sstables in anticompaction (CASSANDRA-14411)
   * Fix JSON queries with IN restrictions and ORDER BY clause (CASSANDRA-14286)
 - * CQL fromJson(null) throws NullPointerException (CASSANDRA-13891)
   * Backport circleci yaml (CASSANDRA-14240)
  Merged from 2.1:
   * Check checksum before decompressing data (CASSANDRA-14284)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/783bbb3c/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/783bbb3c/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
index a088e20,f04439a..52b18a9
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
@@@ -26,10 -27,9 +26,11 @@@ import java.util.*
  
  import static org.apache.commons.io.FileUtils.readFileToByteArray;
  import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertTrue;
  
 +import com.google.common.io.Files;
  import org.junit.After;
 +import org.junit.BeforeClass;
  import org.junit.Test;
  
  import junit.framework.Assert;
@@@ -89,42 -88,46 +90,47 @@@ public class CompressedSequentialWriter
      private void testWrite(File f, int bytesToTest) throws IOException
      {
          final String filename = f.getAbsolutePath();
 -        final ChannelProxy channel = new ChannelProxy(f);
 -
 -        try
 +        MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance)));
 +
 +        byte[] dataPre = new byte[bytesToTest];
 +        byte[] rawPost = new byte[bytesToTest];
 +        try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata",
 +                null, SequentialWriterOption.DEFAULT,
 +                compressionParameters,
 +                sstableMetadataCollector))
          {
 -            MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Arrays.<AbstractType<?>>asList(BytesType.instance)));
 +            Random r = new Random(42);
 +
 +            // Test both write with byte[] and ByteBuffer
 +            r.nextBytes(dataPre);
 +            r.nextBytes(rawPost);
 +            ByteBuffer dataPost = makeBB(bytesToTest);
 +            dataPost.put(rawPost);
 +            dataPost.flip();
 +
 +            writer.write(dataPre);
 +            DataPosition mark = writer.mark();
  
 -            byte[] dataPre = new byte[bytesToTest];
 -            byte[] rawPost = new byte[bytesToTest];
 -            try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", compressionParameters, sstableMetadataCollector);)
 +            // Write enough garbage to transition chunk
 +            for (int i = 0; i < CompressionParams.DEFAULT_CHUNK_LENGTH; i++)
              {
 -                Random r = new Random(42);
 -
 -                // Test both write with byte[] and ByteBuffer
 -                r.nextBytes(dataPre);
 -                r.nextBytes(rawPost);
 -                ByteBuffer dataPost = makeBB(bytesToTest);
 -                dataPost.put(rawPost);
 -                dataPost.flip();
 -
 -                writer.write(dataPre);
 -                DataPosition mark = writer.mark();
 -
 -                // Write enough garbage to transition chunk
 -                for (int i = 0; i < CompressionParams.DEFAULT_CHUNK_LENGTH; i++)
 -                {
 -                    writer.write((byte)i);
 -                }
 -
 -                if (bytesToTest <= CompressionParams.DEFAULT_CHUNK_LENGTH)
 -                    assertEquals(writer.getLastFlushOffset(), CompressionParams.DEFAULT_CHUNK_LENGTH);
 -                else
 -                    assertTrue(writer.getLastFlushOffset() % CompressionParams.DEFAULT_CHUNK_LENGTH == 0);
 -
 -                writer.resetAndTruncate(mark);
 -                writer.write(dataPost);
 -                writer.finish();
 +                writer.write((byte)i);
              }
++            if (bytesToTest <= CompressionParams.DEFAULT_CHUNK_LENGTH)
++                assertEquals(writer.getLastFlushOffset(), CompressionParams.DEFAULT_CHUNK_LENGTH);
++            else
++                assertTrue(writer.getLastFlushOffset() % CompressionParams.DEFAULT_CHUNK_LENGTH == 0);
+ 
 -            assert f.exists();
 -            RandomAccessReader reader = new CompressedRandomAccessReader.Builder(channel, new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32)).build();
 +            writer.resetAndTruncate(mark);
 +            writer.write(dataPost);
 +            writer.finish();
 +        }
 +
 +        assert f.exists();
 +        try (FileHandle.Builder builder = new FileHandle.Builder(filename).withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32));
 +             FileHandle fh = builder.complete();
 +             RandomAccessReader reader = fh.createReader())
 +        {
              assertEquals(dataPre.length + rawPost.length, reader.length());
              byte[] result = new byte[(int)reader.length()];
  


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org