You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2016/05/12 13:23:20 UTC

[15/20] cassandra git commit: Merge commit '849a438690aa97a361227781108cc90355dcbcd9' into cassandra-3.0

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-CompressionInfo.db
index 0000000,0000000..288663f
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Data.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Data.db
index 0000000,0000000..6a2f28e
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Digest.crc32
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Digest.crc32
index 0000000,0000000..fb255bd
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Digest.crc32
@@@ -1,0 -1,0 +1,1 @@@
++655951031

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Filter.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Filter.db
index 0000000,0000000..2e1d5d2
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Index.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Index.db
index 0000000,0000000..b3094bf
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Statistics.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Statistics.db
index 0000000,0000000..55dcdce
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-Summary.db
index 0000000,0000000..9b24e04
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-TOC.txt
index 0000000,0000000..fe9581f
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple/mb-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++CompressionInfo.db
++Summary.db
++TOC.txt
++Digest.crc32
++Filter.db
++Statistics.db
++Data.db
++Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-CompressionInfo.db
index 0000000,0000000..adb7fc4
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Data.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Data.db
index 0000000,0000000..5cd0481
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Digest.crc32
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Digest.crc32
index 0000000,0000000..7d2bdbe
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Digest.crc32
@@@ -1,0 -1,0 +1,1 @@@
++3883019031

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Filter.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Filter.db
index 0000000,0000000..2e1d5d2
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Index.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Index.db
index 0000000,0000000..56f29df
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Statistics.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Statistics.db
index 0000000,0000000..dff24fe
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-Summary.db
index 0000000,0000000..9b24e04
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-TOC.txt
index 0000000,0000000..fe9581f
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_compact/mb-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++CompressionInfo.db
++Summary.db
++TOC.txt
++Digest.crc32
++Filter.db
++Statistics.db
++Data.db
++Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-CompressionInfo.db
index 0000000,0000000..19d25c9
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Data.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Data.db
index 0000000,0000000..77a1394
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Digest.crc32
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Digest.crc32
index 0000000,0000000..172b695
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Digest.crc32
@@@ -1,0 -1,0 +1,1 @@@
++2876949266

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Filter.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Filter.db
index 0000000,0000000..2e1d5d2
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Index.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Index.db
index 0000000,0000000..59e65ca
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Statistics.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Statistics.db
index 0000000,0000000..4c67715
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-Summary.db
index 0000000,0000000..9b24e04
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-TOC.txt
index 0000000,0000000..fe9581f
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter/mb-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++CompressionInfo.db
++Summary.db
++TOC.txt
++Digest.crc32
++Filter.db
++Statistics.db
++Data.db
++Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-CompressionInfo.db
index 0000000,0000000..56c95a8
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Data.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Data.db
index 0000000,0000000..00a7a39
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Digest.crc32
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Digest.crc32
index 0000000,0000000..f9f2fff
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Digest.crc32
@@@ -1,0 -1,0 +1,1 @@@
++1214766167

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Filter.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Filter.db
index 0000000,0000000..2e1d5d2
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Index.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Index.db
index 0000000,0000000..d094f73
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Statistics.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Statistics.db
index 0000000,0000000..33c33f2
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-Summary.db
index 0000000,0000000..9b24e04
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-TOC.txt
index 0000000,0000000..fe9581f
new file mode 100644
--- /dev/null
+++ b/test/data/legacy-sstables/mb/legacy_tables/legacy_mb_simple_counter_compact/mb-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++CompressionInfo.db
++Summary.db
++TOC.txt
++Digest.crc32
++Filter.db
++Statistics.db
++Data.db
++Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
index fea83c1,0c46061..e690785
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
@@@ -59,9 -59,9 +59,9 @@@ public class CommitLogTestReplayer exte
      }
  
      @Override
-     void replayMutation(byte[] inputBuffer, int size, final long entryLocation, final CommitLogDescriptor desc)
+     void replayMutation(byte[] inputBuffer, int size, final int entryLocation, final CommitLogDescriptor desc)
      {
 -        FastByteArrayInputStream bufIn = new FastByteArrayInputStream(inputBuffer, 0, size);
 +        RebufferingInputStream bufIn = new DataInputBuffer(inputBuffer, 0, size);
          Mutation mutation;
          try
          {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
index 7b9b19c,adeb778..b8de711
--- a/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/TrackerTest.java
@@@ -38,8 -38,10 +38,9 @@@ import org.apache.cassandra.MockSchema
  import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.db.ColumnFamilyStore;
  import org.apache.cassandra.db.Memtable;
+ import org.apache.cassandra.db.commitlog.CommitLog;
  import org.apache.cassandra.db.commitlog.ReplayPosition;
  import org.apache.cassandra.db.compaction.OperationType;
 -import org.apache.cassandra.io.sstable.SSTableDeletingTask;
  import org.apache.cassandra.io.sstable.format.SSTableReader;
  import org.apache.cassandra.notifications.*;
  import org.apache.cassandra.utils.concurrent.OpOrder;
@@@ -292,17 -291,19 +293,20 @@@ public class TrackerTes
          Assert.assertTrue(tracker.getView().flushingMemtables.contains(prev1));
          Assert.assertEquals(2, tracker.getView().flushingMemtables.size());
  
--        tracker.replaceFlushed(prev1, null);
++        tracker.replaceFlushed(prev1, Collections.emptyList());
          Assert.assertEquals(1, tracker.getView().flushingMemtables.size());
          Assert.assertTrue(tracker.getView().flushingMemtables.contains(prev2));
  
          SSTableReader reader = MockSchema.sstable(0, 10, false, cfs);
 -        tracker.replaceFlushed(prev2, reader);
 +        tracker.replaceFlushed(prev2, Collections.singleton(reader));
          Assert.assertEquals(1, tracker.getView().sstables.size());
+         Assert.assertEquals(1, tracker.getView().premature.size());
 -        tracker.permitCompactionOfFlushed(reader);
++        tracker.permitCompactionOfFlushed(singleton(reader));
+         Assert.assertEquals(0, tracker.getView().premature.size());
          Assert.assertEquals(1, listener.received.size());
 -        Assert.assertEquals(reader, ((SSTableAddedNotification) listener.received.get(0)).added);
 +        Assert.assertEquals(singleton(reader), ((SSTableAddedNotification) listener.received.get(0)).added);
          listener.received.clear();
 +        Assert.assertTrue(reader.isKeyCacheSetup());
          Assert.assertEquals(10, cfs.metric.liveDiskSpaceUsed.getCount());
  
          // test invalidated CFS
@@@ -314,14 -315,13 +318,13 @@@
          tracker.markFlushing(prev1);
          reader = MockSchema.sstable(0, 10, true, cfs);
          cfs.invalidate(false);
 -        tracker.replaceFlushed(prev1, reader);
 -        tracker.permitCompactionOfFlushed(reader);
 +        tracker.replaceFlushed(prev1, Collections.singleton(reader));
++        tracker.permitCompactionOfFlushed(Collections.singleton(reader));
          Assert.assertEquals(0, tracker.getView().sstables.size());
          Assert.assertEquals(0, tracker.getView().flushingMemtables.size());
          Assert.assertEquals(0, cfs.metric.liveDiskSpaceUsed.getCount());
-         Assert.assertEquals(3, listener.received.size());
-         Assert.assertEquals(singleton(reader), ((SSTableAddedNotification) listener.received.get(0)).added);
-         Assert.assertTrue(listener.received.get(1) instanceof  SSTableDeletingNotification);
-         Assert.assertEquals(1, ((SSTableListChangedNotification) listener.received.get(2)).removed.size());
 -        Assert.assertEquals(1, ((SSTableListChangedNotification) listener.received.get(0)).removed.size());
 -        Assert.assertEquals(reader, (((SSTableDeletingNotification) listener.received.get(1)).deleting));
++        Assert.assertEquals(reader, (((SSTableDeletingNotification) listener.received.get(0)).deleting));
++        Assert.assertEquals(1, ((SSTableListChangedNotification) listener.received.get(1)).removed.size());
          DatabaseDescriptor.setIncrementalBackupsEnabled(backups);
      }
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
index 523c203,5706598..8a5e00e
--- a/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/ViewTest.java
@@@ -212,6 -208,6 +212,6 @@@ public class ViewTes
          for (int i = 0 ; i < sstableCount ; i++)
              sstables.add(MockSchema.sstable(i, cfs));
          return new View(ImmutableList.copyOf(memtables), Collections.<Memtable>emptyList(), Helpers.identityMap(sstables),
-                         Collections.<SSTableReader, SSTableReader>emptyMap(), SSTableIntervalTree.build(sstables));
 -                        Collections.<SSTableReader>emptySet(), Collections.<SSTableReader>emptySet(), SSTableIntervalTree.build(sstables));
++                        Collections.<SSTableReader, SSTableReader>emptyMap(), Collections.<SSTableReader>emptySet(), SSTableIntervalTree.build(sstables));
      }
  }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index 74b5c74,3bef89e..802d9c8
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@@ -115,56 -107,40 +115,56 @@@ public class CompressedRandomAccessRead
          }
      }
  
 -    private void testResetAndTruncate(File f, boolean compressed, int junkSize) throws IOException
 +    private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize) throws IOException
      {
          final String filename = f.getAbsolutePath();
 -        ChannelProxy channel = new ChannelProxy(f);
 -        try
 +        try(ChannelProxy channel = new ChannelProxy(f))
          {
-             MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)).replayPosition(null);
 -            MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance));
 -            SequentialWriter writer = compressed
 -                ? new CompressedSequentialWriter(f, filename + ".metadata", new CompressionParameters(SnappyCompressor.instance), sstableMetadataCollector)
 -                : SequentialWriter.open(f);
++            MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
 +            try(SequentialWriter writer = compressed
 +                ? new CompressedSequentialWriter(f, filename + ".metadata", CompressionParams.snappy(), sstableMetadataCollector)
 +                : SequentialWriter.open(f))
 +            {
 +                writer.write("The quick ".getBytes());
 +                DataPosition mark = writer.mark();
 +                writer.write("blue fox jumps over the lazy dog".getBytes());
 +
 +                // write enough to be sure to change chunk
 +                for (int i = 0; i < junkSize; ++i)
 +                {
 +                    writer.write((byte) 1);
 +                }
 +
 +                writer.resetAndTruncate(mark);
 +                writer.write("brown fox jumps over the lazy dog".getBytes());
 +                writer.finish();
 +            }
 +            assert f.exists();
  
 -            writer.write("The quick ".getBytes());
 -            FileMark mark = writer.mark();
 -            writer.write("blue fox jumps over the lazy dog".getBytes());
 +            CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32) : null;
 +            RandomAccessReader.Builder builder = compressed
 +                                                 ? new CompressedRandomAccessReader.Builder(channel, compressionMetadata)
 +                                                 : new RandomAccessReader.Builder(channel);
  
 -            // write enough to be sure to change chunk
 -            for (int i = 0; i < junkSize; ++i)
 +            if (usemmap)
              {
 -                writer.write((byte)1);
 +                if (compressed)
 +                    builder.regions(MmappedRegions.map(channel, compressionMetadata));
 +                else
 +                    builder.regions(MmappedRegions.map(channel, f.length()));
              }
  
 -            writer.resetAndTruncate(mark);
 -            writer.write("brown fox jumps over the lazy dog".getBytes());
 -            writer.finish();
 +            try(RandomAccessReader reader = builder.build())
 +            {
 +                String expected = "The quick brown fox jumps over the lazy dog";
 +                assertEquals(expected.length(), reader.length());
 +                byte[] b = new byte[expected.length()];
 +                reader.readFully(b);
 +                assert new String(b).equals(expected) : "Expecting '" + expected + "', got '" + new String(b) + '\'';
 +            }
  
 -            assert f.exists();
 -            RandomAccessReader reader = compressed
 -                                      ? CompressedRandomAccessReader.open(channel, new CompressionMetadata(filename + ".metadata", f.length()))
 -                                      : RandomAccessReader.open(f);
 -            String expected = "The quick brown fox jumps over the lazy dog";
 -            assertEquals(expected.length(), reader.length());
 -            byte[] b = new byte[expected.length()];
 -            reader.readFully(b);
 -            assert new String(b).equals(expected) : "Expecting '" + expected + "', got '" + new String(b) + "'";
 +            if (usemmap)
 +                builder.regions.close();
          }
          finally
          {
@@@ -187,11 -166,8 +187,11 @@@
          File metadata = new File(file.getPath() + ".meta");
          metadata.deleteOnExit();
  
 -        MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance));
 -        try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), new CompressionParameters(SnappyCompressor.instance), sstableMetadataCollector))
 +        assertTrue(file.createNewFile());
 +        assertTrue(metadata.createNewFile());
 +
-         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)).replayPosition(null);
++        MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
 +        try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), CompressionParams.snappy(), sstableMetadataCollector))
          {
              writer.write(CONTENT.getBytes());
              writer.finish();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
index 9b09f0b,43c44fd..e045aad
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
@@@ -88,13 -85,12 +88,13 @@@ public class CompressedSequentialWriter
  
          try
          {
-             MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Arrays.<AbstractType<?>>asList(BytesType.instance))).replayPosition(null);
 -            MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance));
++            MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Arrays.<AbstractType<?>>asList(BytesType.instance)));
 +
              byte[] dataPre = new byte[bytesToTest];
              byte[] rawPost = new byte[bytesToTest];
 -            try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", new CompressionParameters(compressor), sstableMetadataCollector);)
 +            try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", compressionParameters, sstableMetadataCollector);)
              {
 -                Random r = new Random();
 +                Random r = new Random(42);
  
                  // Test both write with byte[] and ByteBuffer
                  r.nextBytes(dataPre);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
index 4b9a769,f4b9617..62228e3
--- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
@@@ -60,33 -58,13 +60,33 @@@ import org.apache.cassandra.utils.FBUti
   */
  public class LegacySSTableTest
  {
 +    private static final Logger logger = LoggerFactory.getLogger(LegacySSTableTest.class);
 +
      public static final String LEGACY_SSTABLE_PROP = "legacy-sstable-root";
 -    public static final String KSNAME = "Keyspace1";
 -    public static final String CFNAME = "Standard1";
  
 -    public static Set<String> TEST_DATA;
      public static File LEGACY_SSTABLE_ROOT;
  
 +    /**
 +     * When adding a new sstable version, add that one here.
 +     * See {@link #testGenerateSstables()} to generate sstables.
 +     * Take care on commit as you need to add the sstable files using {@code git add -f}
 +     */
-     public static final String[] legacyVersions = {"ma", "la", "ka", "jb"};
++    public static final String[] legacyVersions = {"mb", "ma", "la", "ka", "jb"};
 +
 +    // 1200 chars
 +    static final String longString = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
 +                                     "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789";
 +
      @BeforeClass
      public static void defineSchema() throws ConfigurationException
      {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
index 4c10882,19fa7c4..93365ef
--- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
@@@ -19,25 -18,30 +19,28 @@@
  package org.apache.cassandra.io.sstable.metadata;
  
  import java.io.File;
+ import java.io.FileNotFoundException;
  import java.io.FileOutputStream;
  import java.io.IOException;
 +import java.util.Collections;
  import java.util.EnumSet;
  import java.util.Map;
 -import java.util.Set;
 -
 -import com.google.common.collect.Sets;
  
  import org.junit.Test;
  
 +import org.apache.cassandra.SchemaLoader;
 +import org.apache.cassandra.config.CFMetaData;
 +import org.apache.cassandra.db.SerializationHeader;
+ import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.db.commitlog.ReplayPosition;
 -import org.apache.cassandra.db.composites.SimpleDenseCellNameType;
 -import org.apache.cassandra.db.marshal.BytesType;
  import org.apache.cassandra.dht.RandomPartitioner;
  import org.apache.cassandra.io.sstable.Component;
  import org.apache.cassandra.io.sstable.Descriptor;
+ import org.apache.cassandra.io.sstable.format.Version;
  import org.apache.cassandra.io.sstable.format.big.BigFormat;
 -import org.apache.cassandra.io.util.DataOutputStreamPlus;
  import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
 +import org.apache.cassandra.io.util.DataOutputStreamPlus;
  import org.apache.cassandra.io.util.RandomAccessReader;
 -import org.apache.cassandra.utils.EstimatedHistogram;
  
  import static org.junit.Assert.assertEquals;
  
@@@ -46,29 -50,81 +49,78 @@@ public class MetadataSerializerTes
      @Test
      public void testSerialization() throws IOException
      {
+         Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();
  
-         CFMetaData cfm = SchemaLoader.standardCFMD("ks1", "cf1");
- 
-         ReplayPosition rp = new ReplayPosition(11L, 12);
+         MetadataSerializer serializer = new MetadataSerializer();
+         File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
  
-         MetadataCollector collector = new MetadataCollector(cfm.comparator).replayPosition(rp);
 -        Descriptor desc = new Descriptor( statsFile.getParentFile(), "", "", 0, Descriptor.Type.FINAL);
++        Descriptor desc = new Descriptor( statsFile.getParentFile(), "", "", 0);
+         try (RandomAccessReader in = RandomAccessReader.open(statsFile))
+         {
+             Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));
  
-         String partitioner = RandomPartitioner.class.getCanonicalName();
-         double bfFpChance = 0.1;
-         Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0, SerializationHeader.make(cfm, Collections.EMPTY_LIST));
+             for (MetadataType type : MetadataType.values())
+             {
+                 assertEquals(originalMetadata.get(type), deserialized.get(type));
+             }
+         }
+     }
  
-         MetadataSerializer serializer = new MetadataSerializer();
+     public File serialize(Map<MetadataType, MetadataComponent> metadata, MetadataSerializer serializer, Version version)
+             throws IOException, FileNotFoundException
+     {
          // Serialize to tmp file
          File statsFile = File.createTempFile(Component.STATS.name, null);
          try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(statsFile)))
          {
-             serializer.serialize(originalMetadata, out, BigFormat.latestVersion);
 -            serializer.serialize(metadata, version, out);
++            serializer.serialize(metadata, out, version);
          }
+         return statsFile;
+     }
  
-         Descriptor desc = new Descriptor( statsFile.getParentFile(), "", "", 0);
-         try (RandomAccessReader in = RandomAccessReader.open(statsFile))
+     public Map<MetadataType, MetadataComponent> constructMetadata()
+     {
 -        EstimatedHistogram rowSizes = new EstimatedHistogram(new long[] { 1L, 2L },
 -                                                             new long[] { 3L, 4L, 5L });
 -        EstimatedHistogram columnCounts = new EstimatedHistogram(new long[] { 6L, 7L },
 -                                                                 new long[] { 8L, 9L, 10L });
 -        ReplayPosition start = new ReplayPosition(11L, 12);
 -        ReplayPosition end = new ReplayPosition(15L, 9);
 -        long minTimestamp = 2162517136L;
 -        long maxTimestamp = 4162517136L;
 -
 -        MetadataCollector collector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance))
 -                                                      .estimatedRowSize(rowSizes)
 -                                                      .estimatedColumnCount(columnCounts)
 -                                                      .commitLogLowerBound(start)
 -                                                      .commitLogUpperBound(end);
 -        collector.updateMinTimestamp(minTimestamp);
 -        collector.updateMaxTimestamp(maxTimestamp);
 -
 -        Set<Integer> ancestors = Sets.newHashSet(1, 2, 3, 4);
 -        for (int i : ancestors)
 -            collector.addAncestor(i);
++        ReplayPosition club = new ReplayPosition(11L, 12);
++        ReplayPosition cllb = new ReplayPosition(9L, 12);
++
++        CFMetaData cfm = SchemaLoader.standardCFMD("ks1", "cf1");
++        MetadataCollector collector = new MetadataCollector(cfm.comparator)
++                                          .commitLogLowerBound(cllb)
++                                          .commitLogUpperBound(club);
+ 
+         String partitioner = RandomPartitioner.class.getCanonicalName();
+         double bfFpChance = 0.1;
 -        Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0);
++        Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0, SerializationHeader.make(cfm, Collections.emptyList()));
+         return originalMetadata;
+     }
+ 
+     @Test
 -    public void testLaReadsLb() throws IOException
++    public void testLaReadLb() throws IOException
++    {
++        testOldReadsNew("la", "lb");
++    }
++
++    @Test
++    public void testMaReadMb() throws IOException
++    {
++        testOldReadsNew("ma", "mb");
++    }
++
++    public void testOldReadsNew(String oldV, String newV) throws IOException
+     {
+         Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();
+ 
+         MetadataSerializer serializer = new MetadataSerializer();
+         // Write metadata in two minor formats.
 -        File statsFileLb = serialize(originalMetadata, serializer, BigFormat.instance.getVersion("lb"));
 -        File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion("la"));
 -
++        File statsFileLb = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(newV));
++        File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV));
+         // Reading both as earlier version should yield identical results.
 -        Descriptor desc = new Descriptor("la", statsFileLb.getParentFile(), "", "", 0, Descriptor.Type.FINAL, DatabaseDescriptor.getSSTableFormat());
++        Descriptor desc = new Descriptor(oldV, statsFileLb.getParentFile(), "", "", 0, DatabaseDescriptor.getSSTableFormat());
+         try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb);
+              RandomAccessReader inLa = RandomAccessReader.open(statsFileLa))
          {
-             Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));
+             Map<MetadataType, MetadataComponent> deserializedLb = serializer.deserialize(desc, inLb, EnumSet.allOf(MetadataType.class));
+             Map<MetadataType, MetadataComponent> deserializedLa = serializer.deserialize(desc, inLa, EnumSet.allOf(MetadataType.class));
  
              for (MetadataType type : MetadataType.values())
              {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/78a3d2bb/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
index 9df3fed,0000000..7cf7bd3
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
+++ b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
@@@ -1,375 -1,0 +1,374 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.io.util;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.nio.ByteBuffer;
 +import java.util.Random;
 +
 +import com.google.common.primitives.Ints;
 +import org.junit.Test;
 +
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import org.apache.cassandra.db.ClusteringComparator;
 +import org.apache.cassandra.db.marshal.BytesType;
 +import org.apache.cassandra.io.compress.CompressedSequentialWriter;
 +import org.apache.cassandra.io.compress.CompressionMetadata;
 +import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 +import org.apache.cassandra.schema.CompressionParams;
 +import org.apache.cassandra.utils.ChecksumType;
 +
 +import static junit.framework.Assert.assertNull;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertTrue;
 +
 +public class MmappedRegionsTest
 +{
 +    private static final Logger logger = LoggerFactory.getLogger(MmappedRegionsTest.class);
 +
 +    private static ByteBuffer allocateBuffer(int size)
 +    {
 +        ByteBuffer ret = ByteBuffer.allocate(Ints.checkedCast(size));
 +        long seed = System.nanoTime();
 +        //seed = 365238103404423L;
 +        logger.info("Seed {}", seed);
 +
 +        new Random(seed).nextBytes(ret.array());
 +        return ret;
 +    }
 +
 +    private static File writeFile(String fileName, ByteBuffer buffer) throws IOException
 +    {
 +        File ret = File.createTempFile(fileName, "1");
 +        ret.deleteOnExit();
 +
 +        try (SequentialWriter writer = SequentialWriter.open(ret))
 +        {
 +            writer.write(buffer);
 +            writer.finish();
 +        }
 +
 +        assert ret.exists();
 +        assert ret.length() >= buffer.capacity();
 +        return ret;
 +
 +    }
 +
 +    @Test
 +    public void testEmpty() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(1024);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testEmpty", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            assertTrue(regions.isEmpty());
 +            assertTrue(regions.isValid(channel));
 +        }
 +    }
 +
 +    @Test
 +    public void testTwoSegments() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(2048);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testTwoSegments", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            regions.extend(1024);
 +            for (int i = 0; i < 1024; i++)
 +            {
 +                MmappedRegions.Region region = regions.floor(i);
 +                assertNotNull(region);
 +                assertEquals(0, region.bottom());
 +                assertEquals(1024, region.top());
 +            }
 +
 +            regions.extend(2048);
 +            for (int i = 0; i < 2048; i++)
 +            {
 +                MmappedRegions.Region region = regions.floor(i);
 +                assertNotNull(region);
 +                if (i < 1024)
 +                {
 +                    assertEquals(0, region.bottom());
 +                    assertEquals(1024, region.top());
 +                }
 +                else
 +                {
 +                    assertEquals(1024, region.bottom());
 +                    assertEquals(2048, region.top());
 +                }
 +            }
 +        }
 +    }
 +
 +    @Test
 +    public void testSmallSegmentSize() throws Exception
 +    {
 +        int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
 +        MmappedRegions.MAX_SEGMENT_SIZE = 1024;
 +
 +        ByteBuffer buffer = allocateBuffer(4096);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testSmallSegmentSize", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            regions.extend(1024);
 +            regions.extend(2048);
 +            regions.extend(4096);
 +
 +            final int SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
 +            for (int i = 0; i < buffer.capacity(); i++)
 +            {
 +                MmappedRegions.Region region = regions.floor(i);
 +                assertNotNull(region);
 +                assertEquals(SIZE * (i / SIZE), region.bottom());
 +                assertEquals(SIZE + (SIZE * (i / SIZE)), region.top());
 +            }
 +        }
 +        finally
 +        {
 +            MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
 +        }
 +    }
 +
 +    @Test
 +    public void testAllocRegions() throws Exception
 +    {
 +        int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
 +        MmappedRegions.MAX_SEGMENT_SIZE = 1024;
 +
 +        ByteBuffer buffer = allocateBuffer(MmappedRegions.MAX_SEGMENT_SIZE * MmappedRegions.REGION_ALLOC_SIZE * 3);
 +
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testAllocRegions", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            regions.extend(buffer.capacity());
 +
 +            final int SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
 +            for (int i = 0; i < buffer.capacity(); i++)
 +            {
 +                MmappedRegions.Region region = regions.floor(i);
 +                assertNotNull(region);
 +                assertEquals(SIZE * (i / SIZE), region.bottom());
 +                assertEquals(SIZE + (SIZE * (i / SIZE)), region.top());
 +            }
 +        }
 +        finally
 +        {
 +            MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
 +        }
 +    }
 +
 +    @Test
 +    public void testCopy() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(128 * 1024);
 +
 +        MmappedRegions snapshot;
 +        ChannelProxy channelCopy;
 +
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testSnapshot", buffer));
 +            MmappedRegions regions = MmappedRegions.map(channel, buffer.capacity() / 4))
 +        {
 +            // create 3 more segments, one per quater capacity
 +            regions.extend(buffer.capacity() / 2);
 +            regions.extend(3 * buffer.capacity() / 4);
 +            regions.extend(buffer.capacity());
 +
 +            // make a snapshot
 +            snapshot = regions.sharedCopy();
 +
 +            // keep the channel open
 +            channelCopy = channel.sharedCopy();
 +        }
 +
 +        assertFalse(snapshot.isCleanedUp());
 +
 +        final int SIZE = buffer.capacity() / 4;
 +        for (int i = 0; i < buffer.capacity(); i++)
 +        {
 +            MmappedRegions.Region region = snapshot.floor(i);
 +            assertNotNull(region);
 +            assertEquals(SIZE * (i / SIZE), region.bottom());
 +            assertEquals(SIZE + (SIZE * (i / SIZE)), region.top());
 +
 +            // check we can access the buffer
 +            assertNotNull(region.buffer.duplicate().getInt());
 +        }
 +
 +        assertNull(snapshot.close(null));
 +        assertNull(channelCopy.close(null));
 +        assertTrue(snapshot.isCleanedUp());
 +    }
 +
 +    @Test(expected = AssertionError.class)
 +    public void testCopyCannotExtend() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(128 * 1024);
 +
 +        MmappedRegions snapshot;
 +        ChannelProxy channelCopy;
 +
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testSnapshotCannotExtend", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            regions.extend(buffer.capacity() / 2);
 +
 +            // make a snapshot
 +            snapshot = regions.sharedCopy();
 +
 +            // keep the channel open
 +            channelCopy = channel.sharedCopy();
 +        }
 +
 +        try
 +        {
 +            snapshot.extend(buffer.capacity());
 +        }
 +        finally
 +        {
 +            assertNull(snapshot.close(null));
 +            assertNull(channelCopy.close(null));
 +        }
 +    }
 +
 +    @Test
 +    public void testExtendOutOfOrder() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(4096);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testExtendOutOfOrder", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            regions.extend(4096);
 +            regions.extend(1024);
 +            regions.extend(2048);
 +
 +            for (int i = 0; i < buffer.capacity(); i++)
 +            {
 +                MmappedRegions.Region region = regions.floor(i);
 +                assertNotNull(region);
 +                assertEquals(0, region.bottom());
 +                assertEquals(4096, region.top());
 +            }
 +        }
 +    }
 +
 +    @Test(expected = IllegalArgumentException.class)
 +    public void testNegativeExtend() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(1024);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testNegativeExtend", buffer));
 +            MmappedRegions regions = MmappedRegions.empty(channel))
 +        {
 +            regions.extend(-1);
 +        }
 +    }
 +
 +    @Test
 +    public void testMapForCompressionMetadata() throws Exception
 +    {
 +        int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
 +        MmappedRegions.MAX_SEGMENT_SIZE = 1024;
 +
 +        ByteBuffer buffer = allocateBuffer(128 * 1024);
 +        File f = File.createTempFile("testMapForCompressionMetadata", "1");
 +        f.deleteOnExit();
 +
 +        File cf = File.createTempFile(f.getName() + ".metadata", "1");
 +        cf.deleteOnExit();
 +
-         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance))
-                                                      .replayPosition(null);
++        MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
 +        try(SequentialWriter writer = new CompressedSequentialWriter(f,
 +                                                                     cf.getAbsolutePath(),
 +                                                                     CompressionParams.snappy(),
 +                                                                     sstableMetadataCollector))
 +        {
 +            writer.write(buffer);
 +            writer.finish();
 +        }
 +
 +        CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), ChecksumType.CRC32);
 +        try(ChannelProxy channel = new ChannelProxy(f);
 +            MmappedRegions regions = MmappedRegions.map(channel, metadata))
 +        {
 +
 +            assertFalse(regions.isEmpty());
 +            int i = 0;
 +            while(i < buffer.capacity())
 +            {
 +                CompressionMetadata.Chunk chunk = metadata.chunkFor(i);
 +
 +                MmappedRegions.Region region = regions.floor(chunk.offset);
 +                assertNotNull(region);
 +
 +                ByteBuffer compressedChunk = region.buffer.duplicate();
 +                assertNotNull(compressedChunk);
 +                assertEquals(chunk.length + 4, compressedChunk.capacity());
 +
 +                assertEquals(chunk.offset, region.bottom());
 +                assertEquals(chunk.offset + chunk.length + 4, region.top());
 +
 +                i += metadata.chunkLength();
 +            }
 +        }
 +        finally
 +        {
 +            MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
 +            metadata.close();
 +        }
 +    }
 +
 +    @Test(expected = IllegalArgumentException.class)
 +    public void testIllegalArgForMap1() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(1024);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testIllegalArgForMap1", buffer));
 +            MmappedRegions regions = MmappedRegions.map(channel, 0))
 +        {
 +            assertTrue(regions.isEmpty());
 +        }
 +    }
 +
 +    @Test(expected = IllegalArgumentException.class)
 +    public void testIllegalArgForMap2() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(1024);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testIllegalArgForMap2", buffer));
 +            MmappedRegions regions = MmappedRegions.map(channel, -1L))
 +        {
 +            assertTrue(regions.isEmpty());
 +        }
 +    }
 +
 +    @Test(expected = IllegalArgumentException.class)
 +    public void testIllegalArgForMap3() throws Exception
 +    {
 +        ByteBuffer buffer = allocateBuffer(1024);
 +        try(ChannelProxy channel = new ChannelProxy(writeFile("testIllegalArgForMap3", buffer));
 +            MmappedRegions regions = MmappedRegions.map(channel, null))
 +        {
 +            assertTrue(regions.isEmpty());
 +        }
 +    }
 +
 +}