You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2013/09/18 19:45:12 UTC

[01/10] git commit: revert #5966

Updated Branches:
  refs/heads/cassandra-1.2 d28cf3eab -> 7161aec42
  refs/heads/cassandra-2.0 e93578b3b -> 2648047a4
  refs/heads/trunk 1c9858306 -> 0eca1652e


revert #5966


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8d6271dd
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8d6271dd
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8d6271dd

Branch: refs/heads/cassandra-2.0
Commit: 8d6271ddf22115da4032247f1fabbcea1b9a9d07
Parents: fc9b3a7
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 09:39:35 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 09:45:37 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 -
 .../db/columniterator/SSTableNamesIterator.java | 37 ++++----------------
 2 files changed, 7 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/8d6271dd/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index fb4f3f4..47ff752 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -3,7 +3,6 @@
  * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
  * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)
  * Allow local batchlog writes for CL.ANY (CASSANDRA-5967)
- * Optimize name query performance in wide rows (CASSANDRA-5966)
  * Upgrade metrics-core to version 2.2.0 (CASSANDRA-5947)
  * Add snitch, schema version, cluster, partitioner to JMX (CASSANDRA-5881)
  * Fix CqlRecordWriter with composite keys (CASSANDRA-5949)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8d6271dd/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
index 40934d4..df28c46 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
@@ -153,7 +153,7 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
         List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
         if (indexList.isEmpty())
         {
-            readSimpleColumns(sstable.metadata, file, columns, result);
+            readSimpleColumns(file, columns, result);
         }
         else
         {
@@ -175,37 +175,27 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
         iter = result.iterator();
     }
 
-    private void readSimpleColumns(CFMetaData metadata,
-                                   FileDataInput file,
-                                   SortedSet<ByteBuffer> columnNames,
-                                   List<OnDiskAtom> result)
-    throws IOException
+    private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<OnDiskAtom> result) throws IOException
     {
-        AbstractType<?> comparator = metadata.comparator;
         OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
-        ByteBuffer maximalColumnName = columnNames.last();
         int count = file.readInt();
-
+        int n = 0;
         for (int i = 0; i < count; i++)
         {
             OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
-            ByteBuffer columnName = column.name();
-
             if (column instanceof IColumn)
             {
-                if (columnNames.contains(columnName))
+                if (columnNames.contains(column.name()))
                 {
                     result.add(column);
+                    if (++n >= columns.size())
+                        break;
                 }
             }
             else
             {
                 result.add(column);
             }
-
-            // Already consumed all of this block that's going to have columns that apply to this query.
-            if (comparator.compare(columnName, maximalColumnName) >= 0)
-                break;
         }
     }
 
@@ -241,12 +231,6 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
         {
             long positionToSeek = basePosition + indexInfo.offset;
 
-            // SortedSet.subSet() is end-exclusive, so we special-case that
-            // if it's one of the columns we're looking for
-            ByteBuffer maximalColumnName = columnNames.contains(indexInfo.lastName)
-                                         ? indexInfo.lastName
-                                         : columnNames.subSet(indexInfo.firstName, indexInfo.lastName).last();
-
             // With new promoted indexes, our first seek in the data file will happen at that point.
             if (file == null)
                 file = createFileDataInput(positionToSeek);
@@ -254,20 +238,13 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
             OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
             file.seek(positionToSeek);
             FileMark mark = file.mark();
-
             // TODO only completely deserialize columns we are interested in
             while (file.bytesPastMark(mark) < indexInfo.width)
             {
                 OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
-                ByteBuffer columnName = column.name();
-
                 // we check vs the original Set, not the filtered List, for efficiency
-                if (!(column instanceof IColumn) || columnNames.contains(columnName))
+                if (!(column instanceof IColumn) || columnNames.contains(column.name()))
                     result.add(column);
-
-                // Already consumed all of this block that's going to have columns that apply to this query.
-                if (comparator.compare(columnName, maximalColumnName) >= 0)
-                    break;
             }
         }
     }


[03/10] git commit: Fix CQL doc imprecision

Posted by jb...@apache.org.
Fix CQL doc imprecision


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d28cf3ea
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d28cf3ea
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d28cf3ea

Branch: refs/heads/cassandra-2.0
Commit: d28cf3eabda8c78ed6b6bc578392643de5d1476f
Parents: 8d6271d
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Wed Sep 18 17:53:29 2013 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Wed Sep 18 17:53:29 2013 +0200

----------------------------------------------------------------------
 doc/cql3/CQL.textile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/d28cf3ea/doc/cql3/CQL.textile
----------------------------------------------------------------------
diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile
index 168a05d..e0cea80 100644
--- a/doc/cql3/CQL.textile
+++ b/doc/cql3/CQL.textile
@@ -602,7 +602,7 @@ bc(syntax)..
 
 <relation> ::= <identifier> ('=' | '<' | '>' | '<=' | '>=') <term>
              | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | TOKEN '(' <identifier> ')' ('=' | '<' | '>' | '<=' | '>=') (<term> | TOKEN '(' <term> ')' )
+             | TOKEN '(' <identifier> ( ',' <identifer>)* ')' ('=' | '<' | '>' | '<=' | '>=') <term>
 
 <order-by> ::= <ordering> ( ',' <odering> )*
 <ordering> ::= <identifer> ( ASC | DESC )?


[02/10] git commit: revert #5966

Posted by jb...@apache.org.
revert #5966


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8d6271dd
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8d6271dd
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8d6271dd

Branch: refs/heads/trunk
Commit: 8d6271ddf22115da4032247f1fabbcea1b9a9d07
Parents: fc9b3a7
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 09:39:35 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 09:45:37 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 -
 .../db/columniterator/SSTableNamesIterator.java | 37 ++++----------------
 2 files changed, 7 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/8d6271dd/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index fb4f3f4..47ff752 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -3,7 +3,6 @@
  * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
  * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)
  * Allow local batchlog writes for CL.ANY (CASSANDRA-5967)
- * Optimize name query performance in wide rows (CASSANDRA-5966)
  * Upgrade metrics-core to version 2.2.0 (CASSANDRA-5947)
  * Add snitch, schema version, cluster, partitioner to JMX (CASSANDRA-5881)
  * Fix CqlRecordWriter with composite keys (CASSANDRA-5949)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8d6271dd/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
index 40934d4..df28c46 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
@@ -153,7 +153,7 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
         List<OnDiskAtom> result = new ArrayList<OnDiskAtom>();
         if (indexList.isEmpty())
         {
-            readSimpleColumns(sstable.metadata, file, columns, result);
+            readSimpleColumns(file, columns, result);
         }
         else
         {
@@ -175,37 +175,27 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
         iter = result.iterator();
     }
 
-    private void readSimpleColumns(CFMetaData metadata,
-                                   FileDataInput file,
-                                   SortedSet<ByteBuffer> columnNames,
-                                   List<OnDiskAtom> result)
-    throws IOException
+    private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<OnDiskAtom> result) throws IOException
     {
-        AbstractType<?> comparator = metadata.comparator;
         OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
-        ByteBuffer maximalColumnName = columnNames.last();
         int count = file.readInt();
-
+        int n = 0;
         for (int i = 0; i < count; i++)
         {
             OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
-            ByteBuffer columnName = column.name();
-
             if (column instanceof IColumn)
             {
-                if (columnNames.contains(columnName))
+                if (columnNames.contains(column.name()))
                 {
                     result.add(column);
+                    if (++n >= columns.size())
+                        break;
                 }
             }
             else
             {
                 result.add(column);
             }
-
-            // Already consumed all of this block that's going to have columns that apply to this query.
-            if (comparator.compare(columnName, maximalColumnName) >= 0)
-                break;
         }
     }
 
@@ -241,12 +231,6 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
         {
             long positionToSeek = basePosition + indexInfo.offset;
 
-            // SortedSet.subSet() is end-exclusive, so we special-case that
-            // if it's one of the columns we're looking for
-            ByteBuffer maximalColumnName = columnNames.contains(indexInfo.lastName)
-                                         ? indexInfo.lastName
-                                         : columnNames.subSet(indexInfo.firstName, indexInfo.lastName).last();
-
             // With new promoted indexes, our first seek in the data file will happen at that point.
             if (file == null)
                 file = createFileDataInput(positionToSeek);
@@ -254,20 +238,13 @@ public class SSTableNamesIterator extends SimpleAbstractColumnIterator implement
             OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
             file.seek(positionToSeek);
             FileMark mark = file.mark();
-
             // TODO only completely deserialize columns we are interested in
             while (file.bytesPastMark(mark) < indexInfo.width)
             {
                 OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
-                ByteBuffer columnName = column.name();
-
                 // we check vs the original Set, not the filtered List, for efficiency
-                if (!(column instanceof IColumn) || columnNames.contains(columnName))
+                if (!(column instanceof IColumn) || columnNames.contains(column.name()))
                     result.add(column);
-
-                // Already consumed all of this block that's going to have columns that apply to this query.
-                if (comparator.compare(columnName, maximalColumnName) >= 0)
-                    break;
             }
         }
     }


[08/10] git commit: merge from 1.2

Posted by jb...@apache.org.
merge from 1.2


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2648047a
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2648047a
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2648047a

Branch: refs/heads/trunk
Commit: 2648047a443306c8f2fc921c353cdbd54d964c5f
Parents: e93578b 7161aec
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 12:44:31 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 12:44:31 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 doc/cql3/CQL.textile                            |  2 +-
 .../org/apache/cassandra/db/Directories.java    | 62 +++++---------------
 .../db/compaction/CompactionManager.java        |  2 +-
 .../cassandra/db/compaction/Scrubber.java       |  2 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |  2 +-
 .../cassandra/streaming/StreamReader.java       |  2 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     |  7 +--
 .../apache/cassandra/db/DirectoriesTest.java    |  2 +-
 .../compaction/LegacyLeveledManifestTest.java   |  2 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |  2 +-
 11 files changed, 28 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 98ea03f,fb9915e..fd8d852
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,30 -1,5 +1,31 @@@
 -1.2.10
 +2.0.1
 + * add file_cache_size_in_mb setting (CASSANDRA-5661)
 + * Improve error message when yaml contains invalid properties (CASSANDRA-5958)
 + * Improve leveled compaction's ability to find non-overlapping L0 compactions
 +   to work on concurrently (CASSANDRA-5921)
 + * Notify indexer of columns shadowed by range tombstones (CASSANDRA-5614)
 + * Log Merkle tree stats (CASSANDRA-2698)
 + * Switch from crc32 to adler32 for compressed sstable checksums (CASSANDRA-5862)
 + * Improve offheap memcpy performance (CASSANDRA-5884)
 + * Use a range aware scanner for cleanup (CASSANDRA-2524)
 + * Cleanup doesn't need to inspect sstables that contain only local data 
 +   (CASSANDRA-5722)
 + * Add ability for CQL3 to list partition keys (CASSANDRA-4536)
 + * Improve native protocol serialization (CASSANDRA-5664)
 + * Upgrade Thrift to 0.9.1 (CASSANDRA-5923)
 + * Require superuser status for adding triggers (CASSANDRA-5963)
 + * Make standalone scrubber handle old and new style leveled manifest
 +   (CASSANDRA-6005)
 + * Fix paxos bugs (CASSANDRA-6012, 6013, 6023)
 + * Fix paged ranges with multiple replicas (CASSANDRA-6004)
 + * Fix potential AssertionError during tracing (CASSANDRA-6041)
 + * Fix NPE in sstablesplit (CASSANDRA-6027)
 + * Migrate pre-2.0 key/value/column aliases to system.schema_columns
 +   (CASSANDRA-6009)
 + * Paging filter empty rows too agressively (CASSANDRA-6040)
 + * Support variadic parameters for IN clauses (CASSANDRA-4210)
 +Merged from 1.2:
+  * Avoid second-guessing out-of-space state (CASSANDRA-5605)
   * Tuning knobs for dealing with large blobs and many CFs (CASSANDRA-5982)
   * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
   * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/doc/cql3/CQL.textile
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/Directories.java
index 52f9699,351c0c0..18fc639
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@@ -19,9 -19,9 +19,10 @@@ package org.apache.cassandra.db
  
  import java.io.File;
  import java.io.FileFilter;
+ import java.io.IOError;
  import java.io.IOException;
  import java.util.*;
 +import java.util.concurrent.TimeUnit;
  import java.util.concurrent.atomic.AtomicInteger;
  import java.util.concurrent.atomic.AtomicLong;
  
@@@ -149,8 -147,15 +150,8 @@@ public class Directorie
              // retry after GCing has forced unmap of compacted SSTables so they can be deleted
              // Note: GCInspector will do this already, but only sun JVM supports GCInspector so far
              SSTableDeletingTask.rescheduleFailedTasks();
 -            try
 -            {
 -                Thread.sleep(10000);
 -            }
 -            catch (InterruptedException e)
 -            {
 -                throw new AssertionError(e);
 -            }
 +            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
-             path = getLocationWithMaximumAvailableSpace(estimatedSize);
+             path = getWriteableLocationAsFile();
          }
  
          return path;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/streaming/StreamReader.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/streaming/StreamReader.java
index 862f5a2,0000000..d72cb5e
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/streaming/StreamReader.java
+++ b/src/java/org/apache/cassandra/streaming/StreamReader.java
@@@ -1,137 -1,0 +1,137 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.streaming;
 +
 +import java.io.DataInput;
 +import java.io.DataInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.nio.channels.Channels;
 +import java.nio.channels.ReadableByteChannel;
 +import java.util.Collection;
 +import java.util.UUID;
 +
 +import com.google.common.base.Throwables;
 +import com.ning.compress.lzf.LZFInputStream;
 +
 +import org.apache.cassandra.config.Schema;
 +import org.apache.cassandra.db.ColumnFamilyStore;
 +import org.apache.cassandra.db.DecoratedKey;
 +import org.apache.cassandra.db.Directories;
 +import org.apache.cassandra.db.Keyspace;
 +import org.apache.cassandra.io.sstable.Component;
 +import org.apache.cassandra.io.sstable.Descriptor;
 +import org.apache.cassandra.io.sstable.SSTableReader;
 +import org.apache.cassandra.io.sstable.SSTableWriter;
 +import org.apache.cassandra.service.StorageService;
 +import org.apache.cassandra.streaming.messages.FileMessageHeader;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +import org.apache.cassandra.utils.BytesReadTracker;
 +import org.apache.cassandra.utils.Pair;
 +
 +/**
 + * StreamReader reads from stream and writes to SSTable.
 + */
 +public class StreamReader
 +{
 +    protected final UUID cfId;
 +    protected final long estimatedKeys;
 +    protected final Collection<Pair<Long, Long>> sections;
 +    protected final StreamSession session;
 +    protected final Descriptor.Version inputVersion;
 +
 +    protected Descriptor desc;
 +
 +    public StreamReader(FileMessageHeader header, StreamSession session)
 +    {
 +        this.session = session;
 +        this.cfId = header.cfId;
 +        this.estimatedKeys = header.estimatedKeys;
 +        this.sections = header.sections;
 +        this.inputVersion = new Descriptor.Version(header.version);
 +    }
 +
 +    /**
 +     * @param channel where this reads data from
 +     * @return SSTable transferred
 +     * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 +     */
 +    public SSTableReader read(ReadableByteChannel channel) throws IOException
 +    {
 +        long totalSize = totalSize();
 +
 +        Pair<String, String> kscf = Schema.instance.getCF(cfId);
 +        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);
 +
 +        SSTableWriter writer = createWriter(cfs, totalSize);
 +        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
 +        BytesReadTracker in = new BytesReadTracker(dis);
 +        try
 +        {
 +            while (in.getBytesRead() < totalSize)
 +            {
 +                writeRow(writer, in, cfs);
 +                // TODO move this to BytesReadTracker
 +                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
 +            }
 +            return writer.closeAndOpenReader();
 +        }
 +        catch (Throwable e)
 +        {
 +            writer.abort();
 +            drain(dis, in.getBytesRead());
 +            if (e instanceof IOException)
 +                throw (IOException) e;
 +            else
 +                throw Throwables.propagate(e);
 +        }
 +    }
 +
 +    protected SSTableWriter createWriter(ColumnFamilyStore cfs, long totalSize) throws IOException
 +    {
-         Directories.DataDirectory localDir = cfs.directories.getLocationCapableOfSize(totalSize);
++        Directories.DataDirectory localDir = cfs.directories.getWriteableLocation();
 +        if (localDir == null)
 +            throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
 +        desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));
 +
 +        return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys);
 +    }
 +
 +    protected void drain(InputStream dis, long bytesRead) throws IOException
 +    {
 +        long toSkip = totalSize() - bytesRead;
 +        toSkip = toSkip - dis.skip(toSkip);
 +        while (toSkip > 0)
 +            toSkip = toSkip - dis.skip(toSkip);
 +    }
 +
 +    protected long totalSize()
 +    {
 +        long size = 0;
 +        for (Pair<Long, Long> section : sections)
 +            size += section.right - section.left;
 +        return size;
 +    }
 +
 +    protected void writeRow(SSTableWriter writer, DataInput in, ColumnFamilyStore cfs) throws IOException
 +    {
 +        DecoratedKey key = StorageService.getPartitioner().decorateKey(ByteBufferUtil.readWithShortLength(in));
 +        writer.appendFromStream(key, cfs.metadata, in, inputVersion);
 +        cfs.invalidateCachedRow(key);
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index f21b60e,abe3f05..0a78b2a
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@@ -1334,66 -1240,12 +1334,65 @@@ public class ColumnFamilyStoreTest exte
          testMultiRangeSlicesBehavior(prepareMultiRangeSlicesTest(10, false));
      }
  
 +    @Test
 +    public void testRemoveUnifinishedCompactionLeftovers() throws Throwable
 +    {
 +        String ks = "Keyspace1";
 +        String cf = "Standard3"; // should be empty
 +
 +        final CFMetaData cfmeta = Schema.instance.getCFMetaData(ks, cf);
 +        Directories dir = Directories.create(ks, cf);
 +        ByteBuffer key = bytes("key");
 +
 +        // 1st sstable
-         SSTableSimpleWriter writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(100),
-                                                              cfmeta, StorageService.getPartitioner());
++        SSTableSimpleWriter writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(), cfmeta, StorageService.getPartitioner());
 +        writer.newRow(key);
 +        writer.addColumn(bytes("col"), bytes("val"), 1);
 +        writer.close();
 +
 +        Map<Descriptor, Set<Component>> sstables = dir.sstableLister().list();
 +        assert sstables.size() == 1;
 +
 +        Map.Entry<Descriptor, Set<Component>> sstableToOpen = sstables.entrySet().iterator().next();
 +        final SSTableReader sstable1 = SSTableReader.open(sstableToOpen.getKey());
 +
 +        // simulate incomplete compaction
-         writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(100),
++        writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(),
 +                                                             cfmeta, StorageService.getPartitioner())
 +        {
 +            protected SSTableWriter getWriter()
 +            {
 +                SSTableMetadata.Collector collector = SSTableMetadata.createCollector(cfmeta.comparator);
 +                collector.addAncestor(sstable1.descriptor.generation); // add ancestor from previously written sstable
 +                return new SSTableWriter(makeFilename(directory, metadata.ksName, metadata.cfName),
 +                                         0,
 +                                         metadata,
 +                                         StorageService.getPartitioner(),
 +                                         collector);
 +            }
 +        };
 +        writer.newRow(key);
 +        writer.addColumn(bytes("col"), bytes("val"), 1);
 +        writer.close();
 +
 +        // should have 2 sstables now
 +        sstables = dir.sstableLister().list();
 +        assert sstables.size() == 2;
 +
 +        ColumnFamilyStore.removeUnfinishedCompactionLeftovers(ks, cf, Sets.newHashSet(sstable1.descriptor.generation));
 +
 +        // 2nd sstable should be removed (only 1st sstable exists in set of size 1)
 +        sstables = dir.sstableLister().list();
 +        assert sstables.size() == 1;
 +        assert sstables.containsKey(sstable1.descriptor);
 +    }
 +
      private ColumnFamilyStore prepareMultiRangeSlicesTest(int valueSize, boolean flush) throws Throwable
      {
 -        String tableName = "Keyspace1";
 +        String keyspaceName = "Keyspace1";
          String cfName = "Standard1";
 -        Table table = Table.open(tableName);
 -        ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
 +        Keyspace keyspace = Keyspace.open(keyspaceName);
 +        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
          cfs.clearUnsafe();
  
          String[] letters = new String[] { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l" };

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
index d8e8af0,0000000..7fd6c10
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
@@@ -1,117 -1,0 +1,117 @@@
 +package org.apache.cassandra.db.compaction;
 +/*
 + * 
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * 
 + *   http://www.apache.org/licenses/LICENSE-2.0
 + * 
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + * 
 + */
 +
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Set;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.db.Directories;
 +import org.apache.cassandra.io.sstable.Descriptor;
 +import org.apache.cassandra.io.sstable.SSTableMetadata;
 +import org.apache.cassandra.io.util.FileUtils;
 +import org.apache.cassandra.utils.Pair;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +import static org.apache.cassandra.db.compaction.LegacyLeveledManifestTestHelper.*;
 +
 +public class LegacyLeveledManifestTest
 +{
 +    private final static String LEGACY_VERSION = "ic";
 +
 +    private File destDir;
 +    @Before
 +    public void setup()
 +    {
-         destDir = Directories.create(KS, CF).getDirectoryForNewSSTables(0);
++        destDir = Directories.create(KS, CF).getDirectoryForNewSSTables();
 +        FileUtils.createDirectory(destDir);
 +        for (File srcFile : getLegacySSTableDir(LEGACY_VERSION).listFiles())
 +        {
 +            File destFile = new File(destDir, srcFile.getName());
 +            FileUtils.createHardLink(srcFile,destFile);
 +            assert destFile.exists() : destFile.getAbsoluteFile();
 +        }
 +    }
 +    @After
 +    public void tearDown()
 +    {
 +        FileUtils.deleteRecursive(destDir);
 +    }
 +
 +    @Test
 +    public void migrateTest() throws IOException
 +    {
 +        assertTrue(LegacyLeveledManifest.manifestNeedsMigration(KS, CF));
 +    }
 +
 +    @Test
 +    public void doMigrationTest() throws IOException, InterruptedException
 +    {
 +        LegacyLeveledManifest.migrateManifests(KS, CF);
 +
 +        for (int i = 0; i <= 2; i++)
 +        {
 +            Descriptor descriptor = Descriptor.fromFilename(destDir+File.separator+KS+"-"+CF+"-"+LEGACY_VERSION+"-"+i+"-Statistics.db");
 +            SSTableMetadata metadata = SSTableMetadata.serializer.deserialize(descriptor).left;
 +            assertEquals(metadata.sstableLevel, i);
 +        }
 +    }
 +
 +    /**
 +     * Validate that the rewritten stats file is the same as the original one.
 +     * @throws IOException
 +     */
 +    @Test
 +    public void validateSSTableMetadataTest() throws IOException
 +    {
 +        Map<Descriptor, Pair<SSTableMetadata, Set<Integer>>> beforeMigration = new HashMap<>();
 +        for (int i = 0; i <= 2; i++)
 +        {
 +            Descriptor descriptor = Descriptor.fromFilename(destDir+File.separator+KS+"-"+CF+"-"+LEGACY_VERSION+"-"+i+"-Statistics.db");
 +            beforeMigration.put(descriptor, SSTableMetadata.serializer.deserialize(descriptor, false));
 +        }
 +
 +        LegacyLeveledManifest.migrateManifests(KS, CF);
 +
 +        for (Map.Entry<Descriptor, Pair<SSTableMetadata, Set<Integer>>> entry : beforeMigration.entrySet())
 +        {
 +            Pair<SSTableMetadata, Set<Integer>> newMetaPair = SSTableMetadata.serializer.deserialize(entry.getKey());
 +            SSTableMetadata newMetadata = newMetaPair.left;
 +            SSTableMetadata oldMetadata = entry.getValue().left;
 +            assertEquals(newMetadata.estimatedRowSize, oldMetadata.estimatedRowSize);
 +            assertEquals(newMetadata.estimatedColumnCount, oldMetadata.estimatedColumnCount);
 +            assertEquals(newMetadata.replayPosition, oldMetadata.replayPosition);
 +            assertEquals(newMetadata.minTimestamp, oldMetadata.minTimestamp);
 +            assertEquals(newMetadata.maxTimestamp, oldMetadata.maxTimestamp);
 +            assertEquals(newMetadata.compressionRatio, oldMetadata.compressionRatio, 0.01);
 +            assertEquals(newMetadata.partitioner, oldMetadata.partitioner);
 +            assertEquals(newMetadata.estimatedTombstoneDropTime, oldMetadata.estimatedTombstoneDropTime);
 +            assertEquals(entry.getValue().right, newMetaPair.right);
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
index e80d2bb,ce569b9..9e7aa16
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
@@@ -39,11 -39,11 +39,11 @@@ public class SSTableSimpleWriterTest ex
          final int INC = 5;
          final int NBCOL = 10;
  
 -        String tablename = "Keyspace1";
 +        String keyspaceName = "Keyspace1";
          String cfname = "StandardInteger1";
  
 -        Table t = Table.open(tablename); // make sure we create the directory
 -        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables();
 +        Keyspace t = Keyspace.open(keyspaceName); // make sure we create the directory
-         File dir = Directories.create(keyspaceName, cfname).getDirectoryForNewSSTables(0);
++        File dir = Directories.create(keyspaceName, cfname).getDirectoryForNewSSTables();
          assert dir.exists();
  
          IPartitioner partitioner = StorageService.getPartitioner();


[06/10] git commit: Avoid second-guessing out-of-space state patch by jbellis; reviewed by yukim for CASSANDRA-5605

Posted by jb...@apache.org.
Avoid second-guessing out-of-space state
patch by jbellis; reviewed by yukim for CASSANDRA-5605


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7161aec4
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7161aec4
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7161aec4

Branch: refs/heads/cassandra-1.2
Commit: 7161aec42c5bdb9e007587e20bc71603a505a95d
Parents: d28cf3e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 12:12:51 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 12:13:23 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../org/apache/cassandra/db/Directories.java    | 62 +++++---------------
 .../db/compaction/CompactionManager.java        |  2 +-
 .../cassandra/db/compaction/Scrubber.java       |  2 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |  2 +-
 .../apache/cassandra/streaming/StreamIn.java    |  2 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     |  2 +-
 .../apache/cassandra/db/DirectoriesTest.java    |  4 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java |  2 +-
 .../cassandra/io/sstable/SSTableReaderTest.java |  3 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |  2 +-
 11 files changed, 27 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 47ff752..fb9915e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2.10
+ * Avoid second-guessing out-of-space state (CASSANDRA-5605)
  * Tuning knobs for dealing with large blobs and many CFs (CASSANDRA-5982)
  * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
  * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index 0890d29..351c0c0 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db;
 
 import java.io.File;
 import java.io.FileFilter;
+import java.io.IOError;
 import java.io.IOException;
 import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -132,9 +133,9 @@ public class Directories
         return null;
     }
 
-    public File getDirectoryForNewSSTables(long estimatedSize)
+    public File getDirectoryForNewSSTables()
     {
-        File path = getLocationWithMaximumAvailableSpace(estimatedSize);
+        File path = getWriteableLocationAsFile();
 
         // Requesting GC has a chance to free space only if we're using mmap and a non SUN jvm
         if (path == null
@@ -154,68 +155,37 @@ public class Directories
             {
                 throw new AssertionError(e);
             }
-            path = getLocationWithMaximumAvailableSpace(estimatedSize);
+            path = getWriteableLocationAsFile();
         }
 
         return path;
     }
 
-    /*
-     * Loop through all the disks to see which disk has the max free space
-     * return the disk with max free space for compactions. If the size of the expected
-     * compacted file is greater than the max disk space available return null, we cannot
-     * do compaction in this case.
-     */
-    public File getLocationWithMaximumAvailableSpace(long estimatedSize)
+    public File getWriteableLocationAsFile()
     {
-        long maxFreeDisk = 0;
-        File maxLocation = null;
-
-        for (File dir : sstableDirectories)
-        {
-            if (BlacklistedDirectories.isUnwritable(dir))
-                continue;
-
-            long usableSpace = dir.getUsableSpace();
-            if (maxFreeDisk < usableSpace)
-            {
-                maxFreeDisk = usableSpace;
-                maxLocation = dir;
-            }
-        }
-        // Load factor of 0.9 we do not want to use the entire disk that is too risky.
-        maxFreeDisk = (long) (0.9 * maxFreeDisk);
-        logger.debug(String.format("expected data files size is %d; largest free partition (%s) has %d bytes free",
-                                   estimatedSize, maxLocation, maxFreeDisk));
-
-        return estimatedSize < maxFreeDisk ? maxLocation : null;
+        return getLocationForDisk(getWriteableLocation());
     }
 
     /**
-     * Finds location which is capable of holding given {@code estimatedSize}.
-     * Picks a non-blacklisted directory with most free space and least current tasks.
-     * If no directory can hold given {@code estimatedSize}, then returns null.
+     * @return a non-blacklisted directory with the most free space and least current tasks.
      *
-     * @param estimatedSize estimated size you need to find location to fit
-     * @return directory capable of given estimated size, or null if none found
+     * @throws IOError if all directories are blacklisted.
      */
-    public DataDirectory getLocationCapableOfSize(long estimatedSize)
+    public DataDirectory getWriteableLocation()
     {
         List<DataDirectory> candidates = new ArrayList<DataDirectory>();
 
         // pick directories with enough space and so that resulting sstable dirs aren't blacklisted for writes.
         for (DataDirectory dataDir : dataFileLocations)
         {
-            File sstableDir = getLocationForDisk(dataDir);
-
-            if (BlacklistedDirectories.isUnwritable(sstableDir))
+            if (BlacklistedDirectories.isUnwritable(getLocationForDisk(dataDir)))
                 continue;
-
-            // need a separate check for sstableDir itself - could be a mounted separate disk or SSD just for this CF.
-            if (dataDir.getEstimatedAvailableSpace() > estimatedSize && sstableDir.getUsableSpace() * 0.9 > estimatedSize)
-                candidates.add(dataDir);
+            candidates.add(dataDir);
         }
 
+        if (candidates.isEmpty())
+            throw new IOError(new IOException("All configured data directories have been blacklisted as unwritable for erroring out"));
+
         // sort directories by free space, in _descending_ order.
         Collections.sort(candidates);
 
@@ -228,7 +198,7 @@ public class Directories
             }
         });
 
-        return candidates.isEmpty() ? null : candidates.get(0);
+        return candidates.get(0);
     }
 
 
@@ -265,7 +235,7 @@ public class Directories
         public long getEstimatedAvailableSpace()
         {
             // Load factor of 0.9 we do not want to use the entire disk that is too risky.
-            return (long)(0.9 * location.getUsableSpace()) - estimatedWorkingSize.get();
+            return location.getUsableSpace() - estimatedWorkingSize.get();
         }
 
         public int compareTo(DataDirectory o)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 4c9c707..93f3108 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -584,7 +584,7 @@ public class CompactionManager implements CompactionManagerMBean
             logger.info("Cleaning up " + sstable);
             // Calculate the expected compacted filesize
             long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(Arrays.asList(sstable), OperationType.CLEANUP);
-            File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(expectedRangeFileSize);
+            File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables();
             if (compactionFileLocation == null)
                 throw new IOException("disk full");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index cb529cb..7b2178b 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -76,7 +76,7 @@ public class Scrubber implements Closeable
         this.outputHandler = outputHandler;
 
         // Calculate the expected compacted filesize
-        this.destination = cfs.directories.getDirectoryForNewSSTables(sstable.onDiskLength());
+        this.destination = cfs.directories.getDirectoryForNewSSTables();
         if (destination == null)
             throw new IOException("disk full");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
index 1be4803..198a88d 100644
--- a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
+++ b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
@@ -34,7 +34,7 @@ public abstract class DiskAwareRunnable extends WrappedRunnable
         while (true)
         {
             writeSize = getExpectedWriteSize();
-            directory = getDirectories().getLocationCapableOfSize(writeSize);
+            directory = getDirectories().getWriteableLocation();
             if (directory != null || !reduceScopeForLimitedSpace())
                 break;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/streaming/StreamIn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/StreamIn.java b/src/java/org/apache/cassandra/streaming/StreamIn.java
index 740b430..85ea7fa 100644
--- a/src/java/org/apache/cassandra/streaming/StreamIn.java
+++ b/src/java/org/apache/cassandra/streaming/StreamIn.java
@@ -80,7 +80,7 @@ public class StreamIn
         // new local sstable
         Table table = Table.open(remotedesc.ksname);
         ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
-        Directories.DataDirectory localDir = cfStore.directories.getLocationCapableOfSize(remote.size);
+        Directories.DataDirectory localDir = cfStore.directories.getWriteableLocation();
         if (localDir == null)
             throw new RuntimeException("Insufficient disk space to store " + remote.size + " bytes");
         Descriptor localdesc = Descriptor.fromFilename(cfStore.getTempSSTablePath(cfStore.directories.getLocationForDisk(localDir)));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index a394644..abe3f05 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -837,7 +837,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         for (int version = 1; version <= 2; ++version)
         {
-            Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(1), "Keyspace2", "Standard1", version, false);
+            Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(), "Keyspace2", "Standard1", version, false);
             Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing), "Keyspace2", "Standard1", version, false);
             for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.STATS })
                 assertTrue("can not find backedup file:" + desc.filenameFor(c), new File(desc.filenameFor(c)).exists());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/DirectoriesTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index 21e183c..dce6f87 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -107,7 +107,7 @@ public class DirectoriesTest
         for (String cf : CFS)
         {
             Directories directories = Directories.create(KS, cf);
-            Assert.assertEquals(cfDir(cf), directories.getDirectoryForNewSSTables(0));
+            Assert.assertEquals(cfDir(cf), directories.getDirectoryForNewSSTables());
 
             Descriptor desc = new Descriptor(cfDir(cf), KS, cf, 1, false);
             File snapshotDir = new File(cfDir(cf),  File.separator + Directories.SNAPSHOT_SUBDIR + File.separator + "42");
@@ -180,7 +180,7 @@ public class DirectoriesTest
     {
         /* files not matching the pattern should just be ignored, with a log warning */
         Directories directories = Directories.create(KS, "bad");
-        File dir = directories.getDirectoryForNewSSTables(1);
+        File dir = directories.getDirectoryForNewSSTables();
         File f = File.createTempFile("bad", "file", dir.getParentFile());
         Directories.migrateSSTables();
         Assert.assertTrue(f.isFile());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 26f0e78..c26939a 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -55,7 +55,7 @@ public class ScrubTest extends SchemaLoader
         File rootDir = new File(root);
         assert rootDir.isDirectory();
 
-        File destDir = Directories.create(TABLE, cf).getDirectoryForNewSSTables(1);
+        File destDir = Directories.create(TABLE, cf).getDirectoryForNewSSTables();
 
         String corruptSSTableName = null;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index d0670a0..02b6855 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -28,7 +28,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.ExecutionException;
-import java.util.*;
 
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -230,7 +229,7 @@ public class SSTableReaderTest extends SchemaLoader
         File rootDir = new File(root + File.separator + "hb" + File.separator + "Keyspace1");
         assert rootDir.isDirectory();
 
-        File destDir = Directories.create("Keyspace1", "Indexed1").getDirectoryForNewSSTables(0);
+        File destDir = Directories.create("Keyspace1", "Indexed1").getDirectoryForNewSSTables();
         assert destDir != null;
 
         FileUtils.createDirectory(destDir);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
index 6efdc9b..ce569b9 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
@@ -43,7 +43,7 @@ public class SSTableSimpleWriterTest extends SchemaLoader
         String cfname = "StandardInteger1";
 
         Table t = Table.open(tablename); // make sure we create the directory
-        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables(0);
+        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables();
         assert dir.exists();
 
         IPartitioner partitioner = StorageService.getPartitioner();


[10/10] git commit: merge from 2.0

Posted by jb...@apache.org.
merge from 2.0


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/0eca1652
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/0eca1652
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/0eca1652

Branch: refs/heads/trunk
Commit: 0eca1652e8a7b943b62a78df9b7d1ea0564a1370
Parents: 1c98583 2648047
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 12:44:56 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 12:44:56 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 doc/cql3/CQL.textile                            |  2 +-
 .../org/apache/cassandra/db/Directories.java    | 62 +++++---------------
 .../db/compaction/CompactionManager.java        |  2 +-
 .../cassandra/db/compaction/Scrubber.java       |  2 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |  2 +-
 .../cassandra/streaming/StreamReader.java       |  2 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     |  7 +--
 .../apache/cassandra/db/DirectoriesTest.java    |  2 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |  2 +-
 10 files changed, 27 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/0eca1652/CHANGES.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0eca1652/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0eca1652/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 47ea97f,2f9aed2..61d187b
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@@ -549,12 -549,12 +549,12 @@@ public class CompactionManager implemen
              int expectedBloomFilterSize = Math.max(cfs.metadata.getIndexInterval(),
                                                     (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable), cfs.metadata)));
              if (logger.isDebugEnabled())
 -                logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);
 +                logger.debug("Expected bloom filter size : {}", expectedBloomFilterSize);
  
 -            logger.info("Cleaning up " + sstable);
 +            logger.info("Cleaning up {}", sstable);
              // Calculate the expected compacted filesize
              long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(Arrays.asList(sstable), OperationType.CLEANUP);
-             File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(expectedRangeFileSize);
+             File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables();
              if (compactionFileLocation == null)
                  throw new IOException("disk full");
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0eca1652/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0eca1652/test/unit/org/apache/cassandra/db/DirectoriesTest.java
----------------------------------------------------------------------


[09/10] git commit: merge from 1.2

Posted by jb...@apache.org.
merge from 1.2


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2648047a
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2648047a
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2648047a

Branch: refs/heads/cassandra-2.0
Commit: 2648047a443306c8f2fc921c353cdbd54d964c5f
Parents: e93578b 7161aec
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 12:44:31 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 12:44:31 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 doc/cql3/CQL.textile                            |  2 +-
 .../org/apache/cassandra/db/Directories.java    | 62 +++++---------------
 .../db/compaction/CompactionManager.java        |  2 +-
 .../cassandra/db/compaction/Scrubber.java       |  2 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |  2 +-
 .../cassandra/streaming/StreamReader.java       |  2 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     |  7 +--
 .../apache/cassandra/db/DirectoriesTest.java    |  2 +-
 .../compaction/LegacyLeveledManifestTest.java   |  2 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |  2 +-
 11 files changed, 28 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 98ea03f,fb9915e..fd8d852
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,30 -1,5 +1,31 @@@
 -1.2.10
 +2.0.1
 + * add file_cache_size_in_mb setting (CASSANDRA-5661)
 + * Improve error message when yaml contains invalid properties (CASSANDRA-5958)
 + * Improve leveled compaction's ability to find non-overlapping L0 compactions
 +   to work on concurrently (CASSANDRA-5921)
 + * Notify indexer of columns shadowed by range tombstones (CASSANDRA-5614)
 + * Log Merkle tree stats (CASSANDRA-2698)
 + * Switch from crc32 to adler32 for compressed sstable checksums (CASSANDRA-5862)
 + * Improve offheap memcpy performance (CASSANDRA-5884)
 + * Use a range aware scanner for cleanup (CASSANDRA-2524)
 + * Cleanup doesn't need to inspect sstables that contain only local data 
 +   (CASSANDRA-5722)
 + * Add ability for CQL3 to list partition keys (CASSANDRA-4536)
 + * Improve native protocol serialization (CASSANDRA-5664)
 + * Upgrade Thrift to 0.9.1 (CASSANDRA-5923)
 + * Require superuser status for adding triggers (CASSANDRA-5963)
 + * Make standalone scrubber handle old and new style leveled manifest
 +   (CASSANDRA-6005)
 + * Fix paxos bugs (CASSANDRA-6012, 6013, 6023)
 + * Fix paged ranges with multiple replicas (CASSANDRA-6004)
 + * Fix potential AssertionError during tracing (CASSANDRA-6041)
 + * Fix NPE in sstablesplit (CASSANDRA-6027)
 + * Migrate pre-2.0 key/value/column aliases to system.schema_columns
 +   (CASSANDRA-6009)
 + * Paging filter empty rows too agressively (CASSANDRA-6040)
 + * Support variadic parameters for IN clauses (CASSANDRA-4210)
 +Merged from 1.2:
+  * Avoid second-guessing out-of-space state (CASSANDRA-5605)
   * Tuning knobs for dealing with large blobs and many CFs (CASSANDRA-5982)
   * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
   * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/doc/cql3/CQL.textile
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/Directories.java
index 52f9699,351c0c0..18fc639
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@@ -19,9 -19,9 +19,10 @@@ package org.apache.cassandra.db
  
  import java.io.File;
  import java.io.FileFilter;
+ import java.io.IOError;
  import java.io.IOException;
  import java.util.*;
 +import java.util.concurrent.TimeUnit;
  import java.util.concurrent.atomic.AtomicInteger;
  import java.util.concurrent.atomic.AtomicLong;
  
@@@ -149,8 -147,15 +150,8 @@@ public class Directorie
              // retry after GCing has forced unmap of compacted SSTables so they can be deleted
              // Note: GCInspector will do this already, but only sun JVM supports GCInspector so far
              SSTableDeletingTask.rescheduleFailedTasks();
 -            try
 -            {
 -                Thread.sleep(10000);
 -            }
 -            catch (InterruptedException e)
 -            {
 -                throw new AssertionError(e);
 -            }
 +            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
-             path = getLocationWithMaximumAvailableSpace(estimatedSize);
+             path = getWriteableLocationAsFile();
          }
  
          return path;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/src/java/org/apache/cassandra/streaming/StreamReader.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/streaming/StreamReader.java
index 862f5a2,0000000..d72cb5e
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/streaming/StreamReader.java
+++ b/src/java/org/apache/cassandra/streaming/StreamReader.java
@@@ -1,137 -1,0 +1,137 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.streaming;
 +
 +import java.io.DataInput;
 +import java.io.DataInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.nio.channels.Channels;
 +import java.nio.channels.ReadableByteChannel;
 +import java.util.Collection;
 +import java.util.UUID;
 +
 +import com.google.common.base.Throwables;
 +import com.ning.compress.lzf.LZFInputStream;
 +
 +import org.apache.cassandra.config.Schema;
 +import org.apache.cassandra.db.ColumnFamilyStore;
 +import org.apache.cassandra.db.DecoratedKey;
 +import org.apache.cassandra.db.Directories;
 +import org.apache.cassandra.db.Keyspace;
 +import org.apache.cassandra.io.sstable.Component;
 +import org.apache.cassandra.io.sstable.Descriptor;
 +import org.apache.cassandra.io.sstable.SSTableReader;
 +import org.apache.cassandra.io.sstable.SSTableWriter;
 +import org.apache.cassandra.service.StorageService;
 +import org.apache.cassandra.streaming.messages.FileMessageHeader;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +import org.apache.cassandra.utils.BytesReadTracker;
 +import org.apache.cassandra.utils.Pair;
 +
 +/**
 + * StreamReader reads from stream and writes to SSTable.
 + */
 +public class StreamReader
 +{
 +    protected final UUID cfId;
 +    protected final long estimatedKeys;
 +    protected final Collection<Pair<Long, Long>> sections;
 +    protected final StreamSession session;
 +    protected final Descriptor.Version inputVersion;
 +
 +    protected Descriptor desc;
 +
 +    public StreamReader(FileMessageHeader header, StreamSession session)
 +    {
 +        this.session = session;
 +        this.cfId = header.cfId;
 +        this.estimatedKeys = header.estimatedKeys;
 +        this.sections = header.sections;
 +        this.inputVersion = new Descriptor.Version(header.version);
 +    }
 +
 +    /**
 +     * @param channel where this reads data from
 +     * @return SSTable transferred
 +     * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 +     */
 +    public SSTableReader read(ReadableByteChannel channel) throws IOException
 +    {
 +        long totalSize = totalSize();
 +
 +        Pair<String, String> kscf = Schema.instance.getCF(cfId);
 +        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);
 +
 +        SSTableWriter writer = createWriter(cfs, totalSize);
 +        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
 +        BytesReadTracker in = new BytesReadTracker(dis);
 +        try
 +        {
 +            while (in.getBytesRead() < totalSize)
 +            {
 +                writeRow(writer, in, cfs);
 +                // TODO move this to BytesReadTracker
 +                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
 +            }
 +            return writer.closeAndOpenReader();
 +        }
 +        catch (Throwable e)
 +        {
 +            writer.abort();
 +            drain(dis, in.getBytesRead());
 +            if (e instanceof IOException)
 +                throw (IOException) e;
 +            else
 +                throw Throwables.propagate(e);
 +        }
 +    }
 +
 +    protected SSTableWriter createWriter(ColumnFamilyStore cfs, long totalSize) throws IOException
 +    {
-         Directories.DataDirectory localDir = cfs.directories.getLocationCapableOfSize(totalSize);
++        Directories.DataDirectory localDir = cfs.directories.getWriteableLocation();
 +        if (localDir == null)
 +            throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
 +        desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));
 +
 +        return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys);
 +    }
 +
 +    protected void drain(InputStream dis, long bytesRead) throws IOException
 +    {
 +        long toSkip = totalSize() - bytesRead;
 +        toSkip = toSkip - dis.skip(toSkip);
 +        while (toSkip > 0)
 +            toSkip = toSkip - dis.skip(toSkip);
 +    }
 +
 +    protected long totalSize()
 +    {
 +        long size = 0;
 +        for (Pair<Long, Long> section : sections)
 +            size += section.right - section.left;
 +        return size;
 +    }
 +
 +    protected void writeRow(SSTableWriter writer, DataInput in, ColumnFamilyStore cfs) throws IOException
 +    {
 +        DecoratedKey key = StorageService.getPartitioner().decorateKey(ByteBufferUtil.readWithShortLength(in));
 +        writer.appendFromStream(key, cfs.metadata, in, inputVersion);
 +        cfs.invalidateCachedRow(key);
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index f21b60e,abe3f05..0a78b2a
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@@ -1334,66 -1240,12 +1334,65 @@@ public class ColumnFamilyStoreTest exte
          testMultiRangeSlicesBehavior(prepareMultiRangeSlicesTest(10, false));
      }
  
 +    @Test
 +    public void testRemoveUnifinishedCompactionLeftovers() throws Throwable
 +    {
 +        String ks = "Keyspace1";
 +        String cf = "Standard3"; // should be empty
 +
 +        final CFMetaData cfmeta = Schema.instance.getCFMetaData(ks, cf);
 +        Directories dir = Directories.create(ks, cf);
 +        ByteBuffer key = bytes("key");
 +
 +        // 1st sstable
-         SSTableSimpleWriter writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(100),
-                                                              cfmeta, StorageService.getPartitioner());
++        SSTableSimpleWriter writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(), cfmeta, StorageService.getPartitioner());
 +        writer.newRow(key);
 +        writer.addColumn(bytes("col"), bytes("val"), 1);
 +        writer.close();
 +
 +        Map<Descriptor, Set<Component>> sstables = dir.sstableLister().list();
 +        assert sstables.size() == 1;
 +
 +        Map.Entry<Descriptor, Set<Component>> sstableToOpen = sstables.entrySet().iterator().next();
 +        final SSTableReader sstable1 = SSTableReader.open(sstableToOpen.getKey());
 +
 +        // simulate incomplete compaction
-         writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(100),
++        writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(),
 +                                                             cfmeta, StorageService.getPartitioner())
 +        {
 +            protected SSTableWriter getWriter()
 +            {
 +                SSTableMetadata.Collector collector = SSTableMetadata.createCollector(cfmeta.comparator);
 +                collector.addAncestor(sstable1.descriptor.generation); // add ancestor from previously written sstable
 +                return new SSTableWriter(makeFilename(directory, metadata.ksName, metadata.cfName),
 +                                         0,
 +                                         metadata,
 +                                         StorageService.getPartitioner(),
 +                                         collector);
 +            }
 +        };
 +        writer.newRow(key);
 +        writer.addColumn(bytes("col"), bytes("val"), 1);
 +        writer.close();
 +
 +        // should have 2 sstables now
 +        sstables = dir.sstableLister().list();
 +        assert sstables.size() == 2;
 +
 +        ColumnFamilyStore.removeUnfinishedCompactionLeftovers(ks, cf, Sets.newHashSet(sstable1.descriptor.generation));
 +
 +        // 2nd sstable should be removed (only 1st sstable exists in set of size 1)
 +        sstables = dir.sstableLister().list();
 +        assert sstables.size() == 1;
 +        assert sstables.containsKey(sstable1.descriptor);
 +    }
 +
      private ColumnFamilyStore prepareMultiRangeSlicesTest(int valueSize, boolean flush) throws Throwable
      {
 -        String tableName = "Keyspace1";
 +        String keyspaceName = "Keyspace1";
          String cfName = "Standard1";
 -        Table table = Table.open(tableName);
 -        ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
 +        Keyspace keyspace = Keyspace.open(keyspaceName);
 +        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
          cfs.clearUnsafe();
  
          String[] letters = new String[] { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l" };

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
index d8e8af0,0000000..7fd6c10
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/LegacyLeveledManifestTest.java
@@@ -1,117 -1,0 +1,117 @@@
 +package org.apache.cassandra.db.compaction;
 +/*
 + * 
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * 
 + *   http://www.apache.org/licenses/LICENSE-2.0
 + * 
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + * 
 + */
 +
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Set;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.db.Directories;
 +import org.apache.cassandra.io.sstable.Descriptor;
 +import org.apache.cassandra.io.sstable.SSTableMetadata;
 +import org.apache.cassandra.io.util.FileUtils;
 +import org.apache.cassandra.utils.Pair;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +import static org.apache.cassandra.db.compaction.LegacyLeveledManifestTestHelper.*;
 +
 +public class LegacyLeveledManifestTest
 +{
 +    private final static String LEGACY_VERSION = "ic";
 +
 +    private File destDir;
 +    @Before
 +    public void setup()
 +    {
-         destDir = Directories.create(KS, CF).getDirectoryForNewSSTables(0);
++        destDir = Directories.create(KS, CF).getDirectoryForNewSSTables();
 +        FileUtils.createDirectory(destDir);
 +        for (File srcFile : getLegacySSTableDir(LEGACY_VERSION).listFiles())
 +        {
 +            File destFile = new File(destDir, srcFile.getName());
 +            FileUtils.createHardLink(srcFile,destFile);
 +            assert destFile.exists() : destFile.getAbsoluteFile();
 +        }
 +    }
 +    @After
 +    public void tearDown()
 +    {
 +        FileUtils.deleteRecursive(destDir);
 +    }
 +
 +    @Test
 +    public void migrateTest() throws IOException
 +    {
 +        assertTrue(LegacyLeveledManifest.manifestNeedsMigration(KS, CF));
 +    }
 +
 +    @Test
 +    public void doMigrationTest() throws IOException, InterruptedException
 +    {
 +        LegacyLeveledManifest.migrateManifests(KS, CF);
 +
 +        for (int i = 0; i <= 2; i++)
 +        {
 +            Descriptor descriptor = Descriptor.fromFilename(destDir+File.separator+KS+"-"+CF+"-"+LEGACY_VERSION+"-"+i+"-Statistics.db");
 +            SSTableMetadata metadata = SSTableMetadata.serializer.deserialize(descriptor).left;
 +            assertEquals(metadata.sstableLevel, i);
 +        }
 +    }
 +
 +    /**
 +     * Validate that the rewritten stats file is the same as the original one.
 +     * @throws IOException
 +     */
 +    @Test
 +    public void validateSSTableMetadataTest() throws IOException
 +    {
 +        Map<Descriptor, Pair<SSTableMetadata, Set<Integer>>> beforeMigration = new HashMap<>();
 +        for (int i = 0; i <= 2; i++)
 +        {
 +            Descriptor descriptor = Descriptor.fromFilename(destDir+File.separator+KS+"-"+CF+"-"+LEGACY_VERSION+"-"+i+"-Statistics.db");
 +            beforeMigration.put(descriptor, SSTableMetadata.serializer.deserialize(descriptor, false));
 +        }
 +
 +        LegacyLeveledManifest.migrateManifests(KS, CF);
 +
 +        for (Map.Entry<Descriptor, Pair<SSTableMetadata, Set<Integer>>> entry : beforeMigration.entrySet())
 +        {
 +            Pair<SSTableMetadata, Set<Integer>> newMetaPair = SSTableMetadata.serializer.deserialize(entry.getKey());
 +            SSTableMetadata newMetadata = newMetaPair.left;
 +            SSTableMetadata oldMetadata = entry.getValue().left;
 +            assertEquals(newMetadata.estimatedRowSize, oldMetadata.estimatedRowSize);
 +            assertEquals(newMetadata.estimatedColumnCount, oldMetadata.estimatedColumnCount);
 +            assertEquals(newMetadata.replayPosition, oldMetadata.replayPosition);
 +            assertEquals(newMetadata.minTimestamp, oldMetadata.minTimestamp);
 +            assertEquals(newMetadata.maxTimestamp, oldMetadata.maxTimestamp);
 +            assertEquals(newMetadata.compressionRatio, oldMetadata.compressionRatio, 0.01);
 +            assertEquals(newMetadata.partitioner, oldMetadata.partitioner);
 +            assertEquals(newMetadata.estimatedTombstoneDropTime, oldMetadata.estimatedTombstoneDropTime);
 +            assertEquals(entry.getValue().right, newMetaPair.right);
 +        }
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2648047a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
index e80d2bb,ce569b9..9e7aa16
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
@@@ -39,11 -39,11 +39,11 @@@ public class SSTableSimpleWriterTest ex
          final int INC = 5;
          final int NBCOL = 10;
  
 -        String tablename = "Keyspace1";
 +        String keyspaceName = "Keyspace1";
          String cfname = "StandardInteger1";
  
 -        Table t = Table.open(tablename); // make sure we create the directory
 -        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables();
 +        Keyspace t = Keyspace.open(keyspaceName); // make sure we create the directory
-         File dir = Directories.create(keyspaceName, cfname).getDirectoryForNewSSTables(0);
++        File dir = Directories.create(keyspaceName, cfname).getDirectoryForNewSSTables();
          assert dir.exists();
  
          IPartitioner partitioner = StorageService.getPartitioner();


[04/10] git commit: Fix CQL doc imprecision

Posted by jb...@apache.org.
Fix CQL doc imprecision


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d28cf3ea
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d28cf3ea
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d28cf3ea

Branch: refs/heads/trunk
Commit: d28cf3eabda8c78ed6b6bc578392643de5d1476f
Parents: 8d6271d
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Wed Sep 18 17:53:29 2013 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Wed Sep 18 17:53:29 2013 +0200

----------------------------------------------------------------------
 doc/cql3/CQL.textile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/d28cf3ea/doc/cql3/CQL.textile
----------------------------------------------------------------------
diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile
index 168a05d..e0cea80 100644
--- a/doc/cql3/CQL.textile
+++ b/doc/cql3/CQL.textile
@@ -602,7 +602,7 @@ bc(syntax)..
 
 <relation> ::= <identifier> ('=' | '<' | '>' | '<=' | '>=') <term>
              | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | TOKEN '(' <identifier> ')' ('=' | '<' | '>' | '<=' | '>=') (<term> | TOKEN '(' <term> ')' )
+             | TOKEN '(' <identifier> ( ',' <identifer>)* ')' ('=' | '<' | '>' | '<=' | '>=') <term>
 
 <order-by> ::= <ordering> ( ',' <odering> )*
 <ordering> ::= <identifer> ( ASC | DESC )?


[07/10] git commit: Avoid second-guessing out-of-space state patch by jbellis; reviewed by yukim for CASSANDRA-5605

Posted by jb...@apache.org.
Avoid second-guessing out-of-space state
patch by jbellis; reviewed by yukim for CASSANDRA-5605


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7161aec4
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7161aec4
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7161aec4

Branch: refs/heads/trunk
Commit: 7161aec42c5bdb9e007587e20bc71603a505a95d
Parents: d28cf3e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 12:12:51 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 12:13:23 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../org/apache/cassandra/db/Directories.java    | 62 +++++---------------
 .../db/compaction/CompactionManager.java        |  2 +-
 .../cassandra/db/compaction/Scrubber.java       |  2 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |  2 +-
 .../apache/cassandra/streaming/StreamIn.java    |  2 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     |  2 +-
 .../apache/cassandra/db/DirectoriesTest.java    |  4 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java |  2 +-
 .../cassandra/io/sstable/SSTableReaderTest.java |  3 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |  2 +-
 11 files changed, 27 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 47ff752..fb9915e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2.10
+ * Avoid second-guessing out-of-space state (CASSANDRA-5605)
  * Tuning knobs for dealing with large blobs and many CFs (CASSANDRA-5982)
  * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
  * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index 0890d29..351c0c0 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db;
 
 import java.io.File;
 import java.io.FileFilter;
+import java.io.IOError;
 import java.io.IOException;
 import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -132,9 +133,9 @@ public class Directories
         return null;
     }
 
-    public File getDirectoryForNewSSTables(long estimatedSize)
+    public File getDirectoryForNewSSTables()
     {
-        File path = getLocationWithMaximumAvailableSpace(estimatedSize);
+        File path = getWriteableLocationAsFile();
 
         // Requesting GC has a chance to free space only if we're using mmap and a non SUN jvm
         if (path == null
@@ -154,68 +155,37 @@ public class Directories
             {
                 throw new AssertionError(e);
             }
-            path = getLocationWithMaximumAvailableSpace(estimatedSize);
+            path = getWriteableLocationAsFile();
         }
 
         return path;
     }
 
-    /*
-     * Loop through all the disks to see which disk has the max free space
-     * return the disk with max free space for compactions. If the size of the expected
-     * compacted file is greater than the max disk space available return null, we cannot
-     * do compaction in this case.
-     */
-    public File getLocationWithMaximumAvailableSpace(long estimatedSize)
+    public File getWriteableLocationAsFile()
     {
-        long maxFreeDisk = 0;
-        File maxLocation = null;
-
-        for (File dir : sstableDirectories)
-        {
-            if (BlacklistedDirectories.isUnwritable(dir))
-                continue;
-
-            long usableSpace = dir.getUsableSpace();
-            if (maxFreeDisk < usableSpace)
-            {
-                maxFreeDisk = usableSpace;
-                maxLocation = dir;
-            }
-        }
-        // Load factor of 0.9 we do not want to use the entire disk that is too risky.
-        maxFreeDisk = (long) (0.9 * maxFreeDisk);
-        logger.debug(String.format("expected data files size is %d; largest free partition (%s) has %d bytes free",
-                                   estimatedSize, maxLocation, maxFreeDisk));
-
-        return estimatedSize < maxFreeDisk ? maxLocation : null;
+        return getLocationForDisk(getWriteableLocation());
     }
 
     /**
-     * Finds location which is capable of holding given {@code estimatedSize}.
-     * Picks a non-blacklisted directory with most free space and least current tasks.
-     * If no directory can hold given {@code estimatedSize}, then returns null.
+     * @return a non-blacklisted directory with the most free space and least current tasks.
      *
-     * @param estimatedSize estimated size you need to find location to fit
-     * @return directory capable of given estimated size, or null if none found
+     * @throws IOError if all directories are blacklisted.
      */
-    public DataDirectory getLocationCapableOfSize(long estimatedSize)
+    public DataDirectory getWriteableLocation()
     {
         List<DataDirectory> candidates = new ArrayList<DataDirectory>();
 
         // pick directories with enough space and so that resulting sstable dirs aren't blacklisted for writes.
         for (DataDirectory dataDir : dataFileLocations)
         {
-            File sstableDir = getLocationForDisk(dataDir);
-
-            if (BlacklistedDirectories.isUnwritable(sstableDir))
+            if (BlacklistedDirectories.isUnwritable(getLocationForDisk(dataDir)))
                 continue;
-
-            // need a separate check for sstableDir itself - could be a mounted separate disk or SSD just for this CF.
-            if (dataDir.getEstimatedAvailableSpace() > estimatedSize && sstableDir.getUsableSpace() * 0.9 > estimatedSize)
-                candidates.add(dataDir);
+            candidates.add(dataDir);
         }
 
+        if (candidates.isEmpty())
+            throw new IOError(new IOException("All configured data directories have been blacklisted as unwritable for erroring out"));
+
         // sort directories by free space, in _descending_ order.
         Collections.sort(candidates);
 
@@ -228,7 +198,7 @@ public class Directories
             }
         });
 
-        return candidates.isEmpty() ? null : candidates.get(0);
+        return candidates.get(0);
     }
 
 
@@ -265,7 +235,7 @@ public class Directories
         public long getEstimatedAvailableSpace()
         {
             // Load factor of 0.9 we do not want to use the entire disk that is too risky.
-            return (long)(0.9 * location.getUsableSpace()) - estimatedWorkingSize.get();
+            return location.getUsableSpace() - estimatedWorkingSize.get();
         }
 
         public int compareTo(DataDirectory o)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 4c9c707..93f3108 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -584,7 +584,7 @@ public class CompactionManager implements CompactionManagerMBean
             logger.info("Cleaning up " + sstable);
             // Calculate the expected compacted filesize
             long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(Arrays.asList(sstable), OperationType.CLEANUP);
-            File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(expectedRangeFileSize);
+            File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables();
             if (compactionFileLocation == null)
                 throw new IOException("disk full");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index cb529cb..7b2178b 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -76,7 +76,7 @@ public class Scrubber implements Closeable
         this.outputHandler = outputHandler;
 
         // Calculate the expected compacted filesize
-        this.destination = cfs.directories.getDirectoryForNewSSTables(sstable.onDiskLength());
+        this.destination = cfs.directories.getDirectoryForNewSSTables();
         if (destination == null)
             throw new IOException("disk full");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
index 1be4803..198a88d 100644
--- a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
+++ b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
@@ -34,7 +34,7 @@ public abstract class DiskAwareRunnable extends WrappedRunnable
         while (true)
         {
             writeSize = getExpectedWriteSize();
-            directory = getDirectories().getLocationCapableOfSize(writeSize);
+            directory = getDirectories().getWriteableLocation();
             if (directory != null || !reduceScopeForLimitedSpace())
                 break;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/streaming/StreamIn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/StreamIn.java b/src/java/org/apache/cassandra/streaming/StreamIn.java
index 740b430..85ea7fa 100644
--- a/src/java/org/apache/cassandra/streaming/StreamIn.java
+++ b/src/java/org/apache/cassandra/streaming/StreamIn.java
@@ -80,7 +80,7 @@ public class StreamIn
         // new local sstable
         Table table = Table.open(remotedesc.ksname);
         ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
-        Directories.DataDirectory localDir = cfStore.directories.getLocationCapableOfSize(remote.size);
+        Directories.DataDirectory localDir = cfStore.directories.getWriteableLocation();
         if (localDir == null)
             throw new RuntimeException("Insufficient disk space to store " + remote.size + " bytes");
         Descriptor localdesc = Descriptor.fromFilename(cfStore.getTempSSTablePath(cfStore.directories.getLocationForDisk(localDir)));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index a394644..abe3f05 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -837,7 +837,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         for (int version = 1; version <= 2; ++version)
         {
-            Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(1), "Keyspace2", "Standard1", version, false);
+            Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(), "Keyspace2", "Standard1", version, false);
             Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing), "Keyspace2", "Standard1", version, false);
             for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.STATS })
                 assertTrue("can not find backedup file:" + desc.filenameFor(c), new File(desc.filenameFor(c)).exists());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/DirectoriesTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index 21e183c..dce6f87 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -107,7 +107,7 @@ public class DirectoriesTest
         for (String cf : CFS)
         {
             Directories directories = Directories.create(KS, cf);
-            Assert.assertEquals(cfDir(cf), directories.getDirectoryForNewSSTables(0));
+            Assert.assertEquals(cfDir(cf), directories.getDirectoryForNewSSTables());
 
             Descriptor desc = new Descriptor(cfDir(cf), KS, cf, 1, false);
             File snapshotDir = new File(cfDir(cf),  File.separator + Directories.SNAPSHOT_SUBDIR + File.separator + "42");
@@ -180,7 +180,7 @@ public class DirectoriesTest
     {
         /* files not matching the pattern should just be ignored, with a log warning */
         Directories directories = Directories.create(KS, "bad");
-        File dir = directories.getDirectoryForNewSSTables(1);
+        File dir = directories.getDirectoryForNewSSTables();
         File f = File.createTempFile("bad", "file", dir.getParentFile());
         Directories.migrateSSTables();
         Assert.assertTrue(f.isFile());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 26f0e78..c26939a 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -55,7 +55,7 @@ public class ScrubTest extends SchemaLoader
         File rootDir = new File(root);
         assert rootDir.isDirectory();
 
-        File destDir = Directories.create(TABLE, cf).getDirectoryForNewSSTables(1);
+        File destDir = Directories.create(TABLE, cf).getDirectoryForNewSSTables();
 
         String corruptSSTableName = null;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index d0670a0..02b6855 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -28,7 +28,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.ExecutionException;
-import java.util.*;
 
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -230,7 +229,7 @@ public class SSTableReaderTest extends SchemaLoader
         File rootDir = new File(root + File.separator + "hb" + File.separator + "Keyspace1");
         assert rootDir.isDirectory();
 
-        File destDir = Directories.create("Keyspace1", "Indexed1").getDirectoryForNewSSTables(0);
+        File destDir = Directories.create("Keyspace1", "Indexed1").getDirectoryForNewSSTables();
         assert destDir != null;
 
         FileUtils.createDirectory(destDir);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
index 6efdc9b..ce569b9 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
@@ -43,7 +43,7 @@ public class SSTableSimpleWriterTest extends SchemaLoader
         String cfname = "StandardInteger1";
 
         Table t = Table.open(tablename); // make sure we create the directory
-        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables(0);
+        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables();
         assert dir.exists();
 
         IPartitioner partitioner = StorageService.getPartitioner();


[05/10] git commit: Avoid second-guessing out-of-space state patch by jbellis; reviewed by yukim for CASSANDRA-5605

Posted by jb...@apache.org.
Avoid second-guessing out-of-space state
patch by jbellis; reviewed by yukim for CASSANDRA-5605


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7161aec4
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7161aec4
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7161aec4

Branch: refs/heads/cassandra-2.0
Commit: 7161aec42c5bdb9e007587e20bc71603a505a95d
Parents: d28cf3e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Sep 18 12:12:51 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Sep 18 12:13:23 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../org/apache/cassandra/db/Directories.java    | 62 +++++---------------
 .../db/compaction/CompactionManager.java        |  2 +-
 .../cassandra/db/compaction/Scrubber.java       |  2 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |  2 +-
 .../apache/cassandra/streaming/StreamIn.java    |  2 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     |  2 +-
 .../apache/cassandra/db/DirectoriesTest.java    |  4 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java |  2 +-
 .../cassandra/io/sstable/SSTableReaderTest.java |  3 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |  2 +-
 11 files changed, 27 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 47ff752..fb9915e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2.10
+ * Avoid second-guessing out-of-space state (CASSANDRA-5605)
  * Tuning knobs for dealing with large blobs and many CFs (CASSANDRA-5982)
  * (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
  * Fix possible divide-by-zero in HHOM (CASSANDRA-5990)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index 0890d29..351c0c0 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db;
 
 import java.io.File;
 import java.io.FileFilter;
+import java.io.IOError;
 import java.io.IOException;
 import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -132,9 +133,9 @@ public class Directories
         return null;
     }
 
-    public File getDirectoryForNewSSTables(long estimatedSize)
+    public File getDirectoryForNewSSTables()
     {
-        File path = getLocationWithMaximumAvailableSpace(estimatedSize);
+        File path = getWriteableLocationAsFile();
 
         // Requesting GC has a chance to free space only if we're using mmap and a non SUN jvm
         if (path == null
@@ -154,68 +155,37 @@ public class Directories
             {
                 throw new AssertionError(e);
             }
-            path = getLocationWithMaximumAvailableSpace(estimatedSize);
+            path = getWriteableLocationAsFile();
         }
 
         return path;
     }
 
-    /*
-     * Loop through all the disks to see which disk has the max free space
-     * return the disk with max free space for compactions. If the size of the expected
-     * compacted file is greater than the max disk space available return null, we cannot
-     * do compaction in this case.
-     */
-    public File getLocationWithMaximumAvailableSpace(long estimatedSize)
+    public File getWriteableLocationAsFile()
     {
-        long maxFreeDisk = 0;
-        File maxLocation = null;
-
-        for (File dir : sstableDirectories)
-        {
-            if (BlacklistedDirectories.isUnwritable(dir))
-                continue;
-
-            long usableSpace = dir.getUsableSpace();
-            if (maxFreeDisk < usableSpace)
-            {
-                maxFreeDisk = usableSpace;
-                maxLocation = dir;
-            }
-        }
-        // Load factor of 0.9 we do not want to use the entire disk that is too risky.
-        maxFreeDisk = (long) (0.9 * maxFreeDisk);
-        logger.debug(String.format("expected data files size is %d; largest free partition (%s) has %d bytes free",
-                                   estimatedSize, maxLocation, maxFreeDisk));
-
-        return estimatedSize < maxFreeDisk ? maxLocation : null;
+        return getLocationForDisk(getWriteableLocation());
     }
 
     /**
-     * Finds location which is capable of holding given {@code estimatedSize}.
-     * Picks a non-blacklisted directory with most free space and least current tasks.
-     * If no directory can hold given {@code estimatedSize}, then returns null.
+     * @return a non-blacklisted directory with the most free space and least current tasks.
      *
-     * @param estimatedSize estimated size you need to find location to fit
-     * @return directory capable of given estimated size, or null if none found
+     * @throws IOError if all directories are blacklisted.
      */
-    public DataDirectory getLocationCapableOfSize(long estimatedSize)
+    public DataDirectory getWriteableLocation()
     {
         List<DataDirectory> candidates = new ArrayList<DataDirectory>();
 
         // pick directories with enough space and so that resulting sstable dirs aren't blacklisted for writes.
         for (DataDirectory dataDir : dataFileLocations)
         {
-            File sstableDir = getLocationForDisk(dataDir);
-
-            if (BlacklistedDirectories.isUnwritable(sstableDir))
+            if (BlacklistedDirectories.isUnwritable(getLocationForDisk(dataDir)))
                 continue;
-
-            // need a separate check for sstableDir itself - could be a mounted separate disk or SSD just for this CF.
-            if (dataDir.getEstimatedAvailableSpace() > estimatedSize && sstableDir.getUsableSpace() * 0.9 > estimatedSize)
-                candidates.add(dataDir);
+            candidates.add(dataDir);
         }
 
+        if (candidates.isEmpty())
+            throw new IOError(new IOException("All configured data directories have been blacklisted as unwritable for erroring out"));
+
         // sort directories by free space, in _descending_ order.
         Collections.sort(candidates);
 
@@ -228,7 +198,7 @@ public class Directories
             }
         });
 
-        return candidates.isEmpty() ? null : candidates.get(0);
+        return candidates.get(0);
     }
 
 
@@ -265,7 +235,7 @@ public class Directories
         public long getEstimatedAvailableSpace()
         {
             // Load factor of 0.9 we do not want to use the entire disk that is too risky.
-            return (long)(0.9 * location.getUsableSpace()) - estimatedWorkingSize.get();
+            return location.getUsableSpace() - estimatedWorkingSize.get();
         }
 
         public int compareTo(DataDirectory o)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 4c9c707..93f3108 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -584,7 +584,7 @@ public class CompactionManager implements CompactionManagerMBean
             logger.info("Cleaning up " + sstable);
             // Calculate the expected compacted filesize
             long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(Arrays.asList(sstable), OperationType.CLEANUP);
-            File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(expectedRangeFileSize);
+            File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables();
             if (compactionFileLocation == null)
                 throw new IOException("disk full");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index cb529cb..7b2178b 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -76,7 +76,7 @@ public class Scrubber implements Closeable
         this.outputHandler = outputHandler;
 
         // Calculate the expected compacted filesize
-        this.destination = cfs.directories.getDirectoryForNewSSTables(sstable.onDiskLength());
+        this.destination = cfs.directories.getDirectoryForNewSSTables();
         if (destination == null)
             throw new IOException("disk full");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
index 1be4803..198a88d 100644
--- a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
+++ b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java
@@ -34,7 +34,7 @@ public abstract class DiskAwareRunnable extends WrappedRunnable
         while (true)
         {
             writeSize = getExpectedWriteSize();
-            directory = getDirectories().getLocationCapableOfSize(writeSize);
+            directory = getDirectories().getWriteableLocation();
             if (directory != null || !reduceScopeForLimitedSpace())
                 break;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/src/java/org/apache/cassandra/streaming/StreamIn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/StreamIn.java b/src/java/org/apache/cassandra/streaming/StreamIn.java
index 740b430..85ea7fa 100644
--- a/src/java/org/apache/cassandra/streaming/StreamIn.java
+++ b/src/java/org/apache/cassandra/streaming/StreamIn.java
@@ -80,7 +80,7 @@ public class StreamIn
         // new local sstable
         Table table = Table.open(remotedesc.ksname);
         ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
-        Directories.DataDirectory localDir = cfStore.directories.getLocationCapableOfSize(remote.size);
+        Directories.DataDirectory localDir = cfStore.directories.getWriteableLocation();
         if (localDir == null)
             throw new RuntimeException("Insufficient disk space to store " + remote.size + " bytes");
         Descriptor localdesc = Descriptor.fromFilename(cfStore.getTempSSTablePath(cfStore.directories.getLocationForDisk(localDir)));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index a394644..abe3f05 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -837,7 +837,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         for (int version = 1; version <= 2; ++version)
         {
-            Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(1), "Keyspace2", "Standard1", version, false);
+            Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(), "Keyspace2", "Standard1", version, false);
             Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing), "Keyspace2", "Standard1", version, false);
             for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.STATS })
                 assertTrue("can not find backedup file:" + desc.filenameFor(c), new File(desc.filenameFor(c)).exists());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/DirectoriesTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index 21e183c..dce6f87 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -107,7 +107,7 @@ public class DirectoriesTest
         for (String cf : CFS)
         {
             Directories directories = Directories.create(KS, cf);
-            Assert.assertEquals(cfDir(cf), directories.getDirectoryForNewSSTables(0));
+            Assert.assertEquals(cfDir(cf), directories.getDirectoryForNewSSTables());
 
             Descriptor desc = new Descriptor(cfDir(cf), KS, cf, 1, false);
             File snapshotDir = new File(cfDir(cf),  File.separator + Directories.SNAPSHOT_SUBDIR + File.separator + "42");
@@ -180,7 +180,7 @@ public class DirectoriesTest
     {
         /* files not matching the pattern should just be ignored, with a log warning */
         Directories directories = Directories.create(KS, "bad");
-        File dir = directories.getDirectoryForNewSSTables(1);
+        File dir = directories.getDirectoryForNewSSTables();
         File f = File.createTempFile("bad", "file", dir.getParentFile());
         Directories.migrateSSTables();
         Assert.assertTrue(f.isFile());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 26f0e78..c26939a 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -55,7 +55,7 @@ public class ScrubTest extends SchemaLoader
         File rootDir = new File(root);
         assert rootDir.isDirectory();
 
-        File destDir = Directories.create(TABLE, cf).getDirectoryForNewSSTables(1);
+        File destDir = Directories.create(TABLE, cf).getDirectoryForNewSSTables();
 
         String corruptSSTableName = null;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index d0670a0..02b6855 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -28,7 +28,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.ExecutionException;
-import java.util.*;
 
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -230,7 +229,7 @@ public class SSTableReaderTest extends SchemaLoader
         File rootDir = new File(root + File.separator + "hb" + File.separator + "Keyspace1");
         assert rootDir.isDirectory();
 
-        File destDir = Directories.create("Keyspace1", "Indexed1").getDirectoryForNewSSTables(0);
+        File destDir = Directories.create("Keyspace1", "Indexed1").getDirectoryForNewSSTables();
         assert destDir != null;
 
         FileUtils.createDirectory(destDir);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7161aec4/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
index 6efdc9b..ce569b9 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
@@ -43,7 +43,7 @@ public class SSTableSimpleWriterTest extends SchemaLoader
         String cfname = "StandardInteger1";
 
         Table t = Table.open(tablename); // make sure we create the directory
-        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables(0);
+        File dir = Directories.create(tablename, cfname).getDirectoryForNewSSTables();
         assert dir.exists();
 
         IPartitioner partitioner = StorageService.getPartitioner();