You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2012/11/10 03:49:12 UTC

[3/4] git commit: Move CompressionMetadata off-heap patch by jbellis and xedin for CASSANDRA-4937

Move CompressionMetadata off-heap
patch by jbellis and xedin for CASSANDRA-4937


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4549a98b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4549a98b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4549a98b

Branch: refs/heads/cassandra-1.2.0
Commit: 4549a98b4ef6c293c200cbd8e226aee2e574c2c0
Parents: 5c81f4b
Author: Jonathan Ellis <jb...@apache.org>
Authored: Fri Nov 9 20:47:27 2012 -0600
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Fri Nov 9 20:47:27 2012 -0600

----------------------------------------------------------------------
 CHANGES.txt                                        |    1 +
 .../cassandra/io/compress/CompressionMetadata.java |   35 ++++---
 .../cassandra/io/util/CompressedSegmentedFile.java |    2 +-
 src/java/org/apache/cassandra/io/util/Memory.java  |   12 +++
 .../org/apache/cassandra/utils/BigLongArray.java   |   77 ---------------
 5 files changed, 35 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4549a98b/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index b73b14d..43fc53a 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2-rc1
+ * Move CompressionMetadata off-heap (CASSANDRA-4937)
  * allow CLI to GET cql3 columnfamily data (CASSANDRA-4924)
  * Fix rare race condition in getExpireTimeForEndpoint (CASSANDRA-4402)
  * acquire references to overlapping sstables during compaction so bloom filter

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4549a98b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index 5587efc..81f99aa 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -20,6 +20,7 @@ package org.apache.cassandra.io.compress;
 import java.io.*;
 import java.util.*;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Longs;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -31,7 +32,7 @@ import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.utils.BigLongArray;
+import org.apache.cassandra.io.util.Memory;
 import org.apache.cassandra.utils.Pair;
 
 /**
@@ -41,7 +42,7 @@ public class CompressionMetadata
 {
     public final long dataLength;
     public final long compressedFileLength;
-    private final BigLongArray chunkOffsets;
+    private final Memory chunkOffsets;
     public final String indexFilePath;
     public final CompressionParameters parameters;
 
@@ -62,7 +63,7 @@ public class CompressionMetadata
         return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO), new File(dataFilePath).length());
     }
 
-    // This is package protected because of the tests.
+    @VisibleForTesting
     CompressionMetadata(String indexFilePath, long compressedLength)
     {
         this.indexFilePath = indexFilePath;
@@ -129,18 +130,18 @@ public class CompressionMetadata
      *
      * @return collection of the chunk offsets.
      */
-    private BigLongArray readChunkOffsets(DataInput input)
+    private Memory readChunkOffsets(DataInput input)
     {
         try
         {
             int chunkCount = input.readInt();
-            BigLongArray offsets = new BigLongArray(chunkCount);
+            Memory offsets = Memory.allocate(chunkCount * 8);
 
             for (int i = 0; i < chunkCount; i++)
             {
                 try
                 {
-                    offsets.set(i, input.readLong());
+                    offsets.setLong(i * 8, input.readLong());
                 }
                 catch (EOFException e)
                 {
@@ -167,15 +168,15 @@ public class CompressionMetadata
     public Chunk chunkFor(long position)
     {
         // position of the chunk
-        int idx = (int) (position / parameters.chunkLength());
+        int idx = 8 * (int) (position / parameters.chunkLength());
 
-        if (idx >= chunkOffsets.size)
+        if (idx >= chunkOffsets.size())
             throw new CorruptSSTableException(new EOFException(), indexFilePath);
 
-        long chunkOffset = chunkOffsets.get(idx);
-        long nextChunkOffset = (idx + 1 == chunkOffsets.size)
+        long chunkOffset = chunkOffsets.getLong(idx);
+        long nextChunkOffset = (idx + 8 == chunkOffsets.size())
                                 ? compressedFileLength
-                                : chunkOffsets.get(idx + 1);
+                                : chunkOffsets.getLong(idx + 8);
 
         return new Chunk(chunkOffset, (int) (nextChunkOffset - chunkOffset - 4)); // "4" bytes reserved for checksum
     }
@@ -200,16 +201,22 @@ public class CompressionMetadata
             int endIndex = (int) (section.right / parameters.chunkLength());
             for (int i = startIndex; i <= endIndex; i++)
             {
-                long chunkOffset = chunkOffsets.get(i);
-                long nextChunkOffset = (i + 1 == chunkOffsets.size)
+                long offset = i * 8;
+                long chunkOffset = chunkOffsets.getLong(offset);
+                long nextChunkOffset = (i + 8 == chunkOffsets.size())
                                                ? compressedFileLength
-                                               : chunkOffsets.get(i + 1);
+                                               : chunkOffsets.getLong(offset + 8);
                 offsets.add(new Chunk(chunkOffset, (int) (nextChunkOffset - chunkOffset - 4))); // "4" bytes reserved for checksum
             }
         }
         return offsets.toArray(new Chunk[offsets.size()]);
     }
 
+    public void close()
+    {
+        chunkOffsets.free();
+    }
+
     public static class Writer extends RandomAccessFile
     {
         // place for uncompressed data length in the index file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4549a98b/src/java/org/apache/cassandra/io/util/CompressedSegmentedFile.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/CompressedSegmentedFile.java b/src/java/org/apache/cassandra/io/util/CompressedSegmentedFile.java
index d82fbae..7280dcd 100644
--- a/src/java/org/apache/cassandra/io/util/CompressedSegmentedFile.java
+++ b/src/java/org/apache/cassandra/io/util/CompressedSegmentedFile.java
@@ -61,6 +61,6 @@ public class CompressedSegmentedFile extends SegmentedFile
 
     public void cleanup()
     {
-        // nothing to do
+        metadata.close();
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4549a98b/src/java/org/apache/cassandra/io/util/Memory.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/Memory.java b/src/java/org/apache/cassandra/io/util/Memory.java
index 25f5caf..76c65e0 100644
--- a/src/java/org/apache/cassandra/io/util/Memory.java
+++ b/src/java/org/apache/cassandra/io/util/Memory.java
@@ -73,6 +73,12 @@ public class Memory
         unsafe.setMemory(peer + offset, bytes, b);
     }
 
+    public void setLong(long offset, long l)
+    {
+        checkPosition(offset);
+        unsafe.putLong(peer + offset, l);
+    }
+
     /**
      * Transfers count bytes from buffer to Memory
      *
@@ -105,6 +111,12 @@ public class Memory
         return unsafe.getByte(peer + offset);
     }
 
+    public long getLong(long offset)
+    {
+        checkPosition(offset);
+        return unsafe.getLong(peer + offset);
+    }
+
     /**
      * Transfers count bytes from Memory starting at memoryOffset to buffer starting at bufferOffset
      *

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4549a98b/src/java/org/apache/cassandra/utils/BigLongArray.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/BigLongArray.java b/src/java/org/apache/cassandra/utils/BigLongArray.java
deleted file mode 100644
index 64481d5..0000000
--- a/src/java/org/apache/cassandra/utils/BigLongArray.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.utils;
-
-/**
- * A GC friendly long[].
- * Allocating large arrays (that are not short-lived) generate fragmentation
- * in old-gen space. This breaks such large long array into fixed size pages
- * to avoid that problem.
- */
-public class BigLongArray
-{
-    private static final int DEFAULT_PAGE_SIZE = 4096;
-
-    private final long[][] pages;
-    public final int size;
-
-    private final int pageSize;
-    private final int pageCount;
-
-    public BigLongArray(int size)
-    {
-        this(size, DEFAULT_PAGE_SIZE);
-    }
-
-    public BigLongArray(int size, int pageSize)
-    {
-        this.size = size;
-        this.pageSize = pageSize;
-
-        int lastPageSize = size % pageSize;
-        int fullPageCount = size / pageSize;
-        pageCount = fullPageCount + (lastPageSize == 0 ? 0 : 1);
-        pages = new long[pageCount][];
-
-        for (int i = 0; i < fullPageCount; ++i)
-            pages[i] = new long[pageSize];
-
-        if (lastPageSize != 0)
-            pages[pages.length - 1] = new long[lastPageSize];
-    }
-
-    public void set(int idx, long value)
-    {
-        if (idx < 0 || idx > size)
-            throw new IndexOutOfBoundsException(String.format("%d is not whithin [0, %d)", idx, size));
-
-        int page = idx / pageSize;
-        int pageIdx = idx % pageSize;
-        pages[page][pageIdx] = value;
-    }
-
-    public long get(int idx)
-    {
-        if (idx < 0 || idx > size)
-            throw new IndexOutOfBoundsException(String.format("%d is not whithin [0, %d)", idx, size));
-
-        int page = idx / pageSize;
-        int pageIdx = idx % pageSize;
-        return pages[page][pageIdx];
-    }
-}