You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by zz...@apache.org on 2017/01/10 02:19:41 UTC
cassandra git commit: Fixed checkAvailableDiskSpace to properly
recalculate expected disk usage of compaction task before reducing scope
Repository: cassandra
Updated Branches:
refs/heads/cassandra-2.2 6f360b6d2 -> dbe039f62
Fixed checkAvailableDiskSpace to properly recalculate expected disk usage of compaction task before reducing scope
patch by Jon Haddad; reviewed by Nate McCall for CASSANDRA-12979
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dbe039f6
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dbe039f6
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dbe039f6
Branch: refs/heads/cassandra-2.2
Commit: dbe039f628220e5141836ecb9aab05d4da60589a
Parents: 6f360b6
Author: Jon Haddad <jo...@jonhaddad.com>
Authored: Wed Jan 4 11:52:54 2017 -0800
Committer: Nate McCall <zz...@gmail.com>
Committed: Tue Jan 10 15:15:47 2017 +1300
----------------------------------------------------------------------
CHANGES.txt | 1 +
NEWS.txt | 6 +++
.../cassandra/db/compaction/CompactionTask.java | 40 +++++++++++++++-----
3 files changed, 38 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/dbe039f6/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index b41313d..4b301ee 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -24,6 +24,7 @@
* Make Collections deserialization more robust (CASSANDRA-12618)
* Better handle invalid system roles table (CASSANDRA-12700)
* Split consistent range movement flag correction (CASSANDRA-12786)
+ * CompactionTasks now correctly drops sstables out of compaction when not enough disk space is available (CASSANDRA-12979)
Merged from 2.1:
* cqlsh copy-from: sort user type fields in csv (CASSANDRA-12959)
* Don't skip sstables based on maxLocalDeletionTime (CASSANDRA-12765)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/dbe039f6/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 37949a1..06e9eeb 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -16,6 +16,12 @@ using the provided 'sstableupgrade' tool.
2.2.9
=====
+Upgrading
+---------
+ - Compaction now correctly drops sstables out of CompactionTask when there
+ isn't enough disk space to perform the full compaction. This should reduce
+ pending compaction tasks on systems with little remaining disk space.
+
Deprecation
-----------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/dbe039f6/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index 20d3dc0..7489b3d 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -75,12 +75,14 @@ public class CompactionTask extends AbstractCompactionTask
return transaction.originals().size();
}
- public boolean reduceScopeForLimitedSpace()
+ public boolean reduceScopeForLimitedSpace(long expectedSize)
{
if (partialCompactionsAcceptable() && transaction.originals().size() > 1)
{
// Try again w/o the largest one.
- logger.warn("insufficient space to compact all requested files {}", StringUtils.join(transaction.originals(), ", "));
+ logger.warn("insufficient space to compact all requested files. {}MB required, {}",
+ (float) expectedSize / 1024 / 1024,
+ StringUtils.join(transaction.originals(), ", "));
// Note that we have removed files that are still marked as compacting.
// This suboptimal but ok since the caller will unmark all the sstables at the end.
SSTableReader removedSSTable = cfs.getMaxSizeFile(transaction.originals());
@@ -113,9 +115,8 @@ public class CompactionTask extends AbstractCompactionTask
// note that we need to do a rough estimate early if we can fit the compaction on disk - this is pessimistic, but
// since we might remove sstables from the compaction in checkAvailableDiskSpace it needs to be done here
- long expectedWriteSize = cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
- long earlySSTableEstimate = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
- checkAvailableDiskSpace(earlySSTableEstimate, expectedWriteSize);
+
+ checkAvailableDiskSpace();
// sanity check: all sstables must belong to the same cfs
assert !Iterables.any(transaction.originals(), new Predicate<SSTableReader>()
@@ -271,12 +272,33 @@ public class CompactionTask extends AbstractCompactionTask
return minRepairedAt;
}
- protected void checkAvailableDiskSpace(long estimatedSSTables, long expectedWriteSize)
+ /*
+ Checks if we have enough disk space to execute the compaction. Drops the largest sstable out of the Task until
+ there's enough space (in theory) to handle the compaction. Does not take into account space that will be taken by
+ other compactions.
+ */
+ protected void checkAvailableDiskSpace()
{
- while (!cfs.directories.hasAvailableDiskSpace(estimatedSSTables, expectedWriteSize))
+ AbstractCompactionStrategy strategy = cfs.getCompactionStrategy();
+
+ while(true)
{
- if (!reduceScopeForLimitedSpace())
- throw new RuntimeException(String.format("Not enough space for compaction, estimated sstables = %d, expected write size = %d", estimatedSSTables, expectedWriteSize));
+ long expectedWriteSize = cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
+ long estimatedSSTables = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
+
+ if(cfs.directories.hasAvailableDiskSpace(estimatedSSTables, expectedWriteSize))
+ break;
+
+ if (!reduceScopeForLimitedSpace(expectedWriteSize))
+ {
+ // we end up here if we can't take any more sstables out of the compaction.
+ // usually means we've run out of disk space
+ String msg = String.format("Not enough space for compaction, estimated sstables = %d, expected write size = %d", estimatedSSTables, expectedWriteSize);
+ logger.warn(msg);
+ throw new RuntimeException(msg);
+ }
+ logger.warn("Not enough space for compaction, {}MB estimated. Reducing scope.",
+ (float) expectedWriteSize / 1024 / 1024);
}
}