You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by al...@apache.org on 2014/02/03 21:32:35 UTC
[1/4] git commit: Let scrub optionally skip broken counter partitions
Updated Branches:
refs/heads/trunk fc91071c0 -> 63f110b5e
Let scrub optionally skip broken counter partitions
patch by Tyler Hobbs; reviewed by Aleksey Yeschenko for CASSANDRA-5930
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/728c4fa9
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/728c4fa9
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/728c4fa9
Branch: refs/heads/trunk
Commit: 728c4fa9bf2b2c11dbc61c8e5536b1542abc1ccb
Parents: b713721
Author: Aleksey Yeschenko <al...@apache.org>
Authored: Mon Feb 3 23:01:31 2014 +0300
Committer: Aleksey Yeschenko <al...@apache.org>
Committed: Mon Feb 3 23:01:31 2014 +0300
----------------------------------------------------------------------
CHANGES.txt | 4 +
NEWS.txt | 12 ++-
.../apache/cassandra/db/ColumnFamilyStore.java | 4 +-
.../db/compaction/CompactionManager.java | 12 +--
.../cassandra/db/compaction/Scrubber.java | 37 ++++++---
.../cassandra/service/StorageService.java | 4 +-
.../cassandra/service/StorageServiceMBean.java | 2 +-
.../org/apache/cassandra/tools/NodeCmd.java | 6 +-
.../org/apache/cassandra/tools/NodeProbe.java | 4 +-
.../cassandra/tools/StandaloneScrubber.java | 6 +-
.../apache/cassandra/tools/NodeToolHelp.yaml | 6 +-
.../unit/org/apache/cassandra/db/ScrubTest.java | 81 ++++++++++++++++++--
12 files changed, 140 insertions(+), 38 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 13b4c5b..a1a58a3 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,3 +1,7 @@
+2.0.6
+ * Let scrub optionally skip broken counter partitions (CASSANDRA-5930)
+
+
2.0.5
* Reduce garbage generated by bloom filter lookups (CASSANDRA-6609)
* Add ks.cf names to tombstone logging (CASSANDRA-6597)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 92446c8..b21fbaa 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -14,11 +14,21 @@ restore snapshots created with the previous major version using the
using the provided 'sstableupgrade' tool.
+2.0.6
+=====
+
+New features
+------------
+ - Scrub can now optionally skip corrupt counter partitions. Please note
+ that this will lead to the loss of all the counter updates in the skipped
+ partition. See the --skip-corrupted option.
+
+
2.0.5
=====
New features
---------
+------------
- Batchlog replay can be, and is throttled by default now.
See batchlog_replay_throttle_in_kb setting in cassandra.yaml.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index 8750026..38d87db 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -1115,12 +1115,12 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
CompactionManager.instance.performCleanup(ColumnFamilyStore.this, renewer);
}
- public void scrub(boolean disableSnapshot) throws ExecutionException, InterruptedException
+ public void scrub(boolean disableSnapshot, boolean skipCorrupted) throws ExecutionException, InterruptedException
{
// skip snapshot creation during scrub, SEE JIRA 5891
if(!disableSnapshot)
snapshotWithoutFlush("pre-scrub-" + System.currentTimeMillis());
- CompactionManager.instance.performScrub(ColumnFamilyStore.this);
+ CompactionManager.instance.performScrub(ColumnFamilyStore.this, skipCorrupted);
}
public void sstablesRewrite(boolean excludeCurrentVersion) throws ExecutionException, InterruptedException
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 168ee02..48900c8 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -227,13 +227,13 @@ public class CompactionManager implements CompactionManagerMBean
executor.submit(runnable).get();
}
- public void performScrub(ColumnFamilyStore cfStore) throws InterruptedException, ExecutionException
+ public void performScrub(ColumnFamilyStore cfStore, final boolean skipCorrupted) throws InterruptedException, ExecutionException
{
performAllSSTableOperation(cfStore, new AllSSTablesOperation()
{
public void perform(ColumnFamilyStore store, Iterable<SSTableReader> sstables) throws IOException
{
- doScrub(store, sstables);
+ doScrub(store, sstables, skipCorrupted);
}
});
}
@@ -425,16 +425,16 @@ public class CompactionManager implements CompactionManagerMBean
*
* @throws IOException
*/
- private void doScrub(ColumnFamilyStore cfs, Iterable<SSTableReader> sstables) throws IOException
+ private void doScrub(ColumnFamilyStore cfs, Iterable<SSTableReader> sstables, boolean skipCorrupted) throws IOException
{
assert !cfs.isIndex();
for (final SSTableReader sstable : sstables)
- scrubOne(cfs, sstable);
+ scrubOne(cfs, sstable, skipCorrupted);
}
- private void scrubOne(ColumnFamilyStore cfs, SSTableReader sstable) throws IOException
+ private void scrubOne(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted) throws IOException
{
- Scrubber scrubber = new Scrubber(cfs, sstable);
+ Scrubber scrubber = new Scrubber(cfs, sstable, skipCorrupted);
CompactionInfo.Holder scrubInfo = scrubber.getScrubInfo();
metrics.beginCompaction(scrubInfo);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index 708e929..820761c 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -35,6 +35,7 @@ public class Scrubber implements Closeable
public final ColumnFamilyStore cfs;
public final SSTableReader sstable;
public final File destination;
+ public final boolean skipCorrupted;
private final CompactionController controller;
private final boolean isCommutative;
@@ -63,16 +64,17 @@ public class Scrubber implements Closeable
};
private final SortedSet<Row> outOfOrderRows = new TreeSet<>(rowComparator);
- public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable) throws IOException
+ public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted) throws IOException
{
- this(cfs, sstable, new OutputHandler.LogOutput(), false);
+ this(cfs, sstable, skipCorrupted, new OutputHandler.LogOutput(), false);
}
- public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, OutputHandler outputHandler, boolean isOffline) throws IOException
+ public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted, OutputHandler outputHandler, boolean isOffline) throws IOException
{
this.cfs = cfs;
this.sstable = sstable;
this.outputHandler = outputHandler;
+ this.skipCorrupted = skipCorrupted;
// Calculate the expected compacted filesize
this.destination = cfs.directories.getDirectoryForNewSSTables();
@@ -166,7 +168,9 @@ public class Scrubber implements Closeable
if (!sstable.descriptor.version.hasRowSizeAndColumnCount)
{
dataSize = dataSizeFromIndex;
- outputHandler.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
+ // avoid an NPE if key is null
+ String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.key);
+ outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSize));
}
else
{
@@ -203,7 +207,7 @@ public class Scrubber implements Closeable
catch (Throwable th)
{
throwIfFatal(th);
- outputHandler.warn("Non-fatal error reading row (stacktrace follows)", th);
+ outputHandler.warn("Error reading row (stacktrace follows):", th);
writer.resetAndTruncate();
if (currentIndexKey != null
@@ -231,9 +235,7 @@ public class Scrubber implements Closeable
catch (Throwable th2)
{
throwIfFatal(th2);
- // Skipping rows is dangerous for counters (see CASSANDRA-2759)
- if (isCommutative)
- throw new IOError(th2);
+ throwIfCommutative(key, th2);
outputHandler.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2);
writer.resetAndTruncate();
@@ -243,11 +245,9 @@ public class Scrubber implements Closeable
}
else
{
- // Skipping rows is dangerous for counters (see CASSANDRA-2759)
- if (isCommutative)
- throw new IOError(th);
+ throwIfCommutative(key, th);
- outputHandler.warn("Row at " + dataStart + " is unreadable; skipping to next");
+ outputHandler.warn("Row starting at position " + dataStart + " is unreadable; skipping to next");
if (currentIndexKey != null)
dataFile.seek(nextRowPositionFromIndex);
badRows++;
@@ -324,6 +324,19 @@ public class Scrubber implements Closeable
throw (Error) th;
}
+ private void throwIfCommutative(DecoratedKey key, Throwable th)
+ {
+ if (isCommutative && !skipCorrupted)
+ {
+ outputHandler.warn(String.format("An error occurred while scrubbing the row with key '%s'. Skipping corrupt " +
+ "rows in counter tables will result in undercounts for the affected " +
+ "counters (see CASSANDRA-2759 for more details), so by default the scrub will " +
+ "stop at this point. If you would like to skip the row anyway and continue " +
+ "scrubbing, re-run the scrub with the --skip-corrupted option.", key));
+ throw new IOError(th);
+ }
+ }
+
public void close()
{
FileUtils.closeQuietly(dataFile);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/service/StorageService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 700966f..f46ae66 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -2155,10 +2155,10 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
}
}
- public void scrub(boolean disableSnapshot, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException
+ public void scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException
{
for (ColumnFamilyStore cfStore : getValidColumnFamilies(false, false, keyspaceName, columnFamilies))
- cfStore.scrub(disableSnapshot);
+ cfStore.scrub(disableSnapshot, skipCorrupted);
}
public void upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/service/StorageServiceMBean.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageServiceMBean.java b/src/java/org/apache/cassandra/service/StorageServiceMBean.java
index df85901..d31e8b9 100644
--- a/src/java/org/apache/cassandra/service/StorageServiceMBean.java
+++ b/src/java/org/apache/cassandra/service/StorageServiceMBean.java
@@ -231,7 +231,7 @@ public interface StorageServiceMBean extends NotificationEmitter
*
* Scrubbed CFs will be snapshotted first, if disableSnapshot is false
*/
- public void scrub(boolean disableSnapshot, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException;
+ public void scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException;
/**
* Rewrite all sstables to the latest version.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/tools/NodeCmd.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/NodeCmd.java b/src/java/org/apache/cassandra/tools/NodeCmd.java
index 0cc7320..ab05d16 100644
--- a/src/java/org/apache/cassandra/tools/NodeCmd.java
+++ b/src/java/org/apache/cassandra/tools/NodeCmd.java
@@ -74,6 +74,8 @@ public class NodeCmd
private static final Pair<String, String> NO_SNAPSHOT = Pair.create("ns", "no-snapshot");
private static final Pair<String, String> CFSTATS_IGNORE_OPT = Pair.create("i", "ignore");
private static final Pair<String, String> RESOLVE_IP = Pair.create("r", "resolve-ip");
+ private static final Pair<String, String> SCRUB_SKIP_CORRUPTED_OPT = Pair.create("s", "skip-corrupted");
+
private static final String DEFAULT_HOST = "127.0.0.1";
private static final int DEFAULT_PORT = 7199;
@@ -101,6 +103,7 @@ public class NodeCmd
options.addOption(NO_SNAPSHOT, false, "disables snapshot creation for scrub");
options.addOption(CFSTATS_IGNORE_OPT, false, "ignore the supplied list of keyspace.columnfamiles in statistics");
options.addOption(RESOLVE_IP, false, "show node domain names instead of IPs");
+ options.addOption(SCRUB_SKIP_CORRUPTED_OPT, false, "when scrubbing counter tables, skip corrupted rows");
}
public NodeCmd(NodeProbe probe)
@@ -1562,7 +1565,8 @@ public class NodeCmd
break;
case SCRUB :
boolean disableSnapshot = cmd.hasOption(NO_SNAPSHOT.left);
- try { probe.scrub(disableSnapshot, keyspace, columnFamilies); }
+ boolean skipCorrupted = cmd.hasOption(SCRUB_SKIP_CORRUPTED_OPT.left);
+ try { probe.scrub(disableSnapshot, skipCorrupted, keyspace, columnFamilies); }
catch (ExecutionException ee) { err(ee, "Error occurred while scrubbing keyspace " + keyspace); }
break;
case UPGRADESSTABLES :
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/tools/NodeProbe.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/NodeProbe.java b/src/java/org/apache/cassandra/tools/NodeProbe.java
index 1bb9d4e..0fbb12a 100644
--- a/src/java/org/apache/cassandra/tools/NodeProbe.java
+++ b/src/java/org/apache/cassandra/tools/NodeProbe.java
@@ -190,9 +190,9 @@ public class NodeProbe
ssProxy.forceKeyspaceCleanup(keyspaceName, columnFamilies);
}
- public void scrub(boolean disableSnapshot, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException
+ public void scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException
{
- ssProxy.scrub(disableSnapshot, keyspaceName, columnFamilies);
+ ssProxy.scrub(disableSnapshot, skipCorrupted, keyspaceName, columnFamilies);
}
public void upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
index 00e0a5a..6556c3a 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
@@ -49,6 +49,7 @@ public class StandaloneScrubber
private static final String DEBUG_OPTION = "debug";
private static final String HELP_OPTION = "help";
private static final String MANIFEST_CHECK_OPTION = "manifest-check";
+ private static final String SKIP_CORRUPTED_OPTION = "skip-corrupted";
public static void main(String args[])
{
@@ -119,7 +120,7 @@ public class StandaloneScrubber
{
try
{
- Scrubber scrubber = new Scrubber(cfs, sstable, handler, true);
+ Scrubber scrubber = new Scrubber(cfs, sstable, options.skipCorrupted, handler, true);
try
{
scrubber.scrub();
@@ -184,6 +185,7 @@ public class StandaloneScrubber
public boolean debug;
public boolean verbose;
public boolean manifestCheckOnly;
+ public boolean skipCorrupted;
private Options(String keyspaceName, String cfName)
{
@@ -222,6 +224,7 @@ public class StandaloneScrubber
opts.debug = cmd.hasOption(DEBUG_OPTION);
opts.verbose = cmd.hasOption(VERBOSE_OPTION);
opts.manifestCheckOnly = cmd.hasOption(MANIFEST_CHECK_OPTION);
+ opts.skipCorrupted = cmd.hasOption(SKIP_CORRUPTED_OPTION);
return opts;
}
@@ -246,6 +249,7 @@ public class StandaloneScrubber
options.addOption("v", VERBOSE_OPTION, "verbose output");
options.addOption("h", HELP_OPTION, "display this help message");
options.addOption("m", MANIFEST_CHECK_OPTION, "only check and repair the leveled manifest, without actually scrubbing the sstables");
+ options.addOption("s", SKIP_CORRUPTED_OPTION, "skip corrupt rows in counter tables");
return options;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/src/resources/org/apache/cassandra/tools/NodeToolHelp.yaml
----------------------------------------------------------------------
diff --git a/src/resources/org/apache/cassandra/tools/NodeToolHelp.yaml b/src/resources/org/apache/cassandra/tools/NodeToolHelp.yaml
index 42fda0d..b28e300 100644
--- a/src/resources/org/apache/cassandra/tools/NodeToolHelp.yaml
+++ b/src/resources/org/apache/cassandra/tools/NodeToolHelp.yaml
@@ -163,9 +163,11 @@ commands:
- name: compact [keyspace] [cfnames]
help: |
Force a (major) compaction on one or more column families
- - name: scrub [keyspace] [cfnames]
+ - name: scrub [keyspace] [cfnames] [-s|--skip-corrupted]
help: |
- Scrub (rebuild sstables for) one or more column families
+ Scrub (rebuild sstables for) one or more column families.
+ Use -s/--skip-corrupted to skip corrupted rows even when scrubbing
+ tables that use counters.
- name: upgradesstables [-a|--include-all-sstables] [keyspace] [cfnames]
help: |
Rewrite sstables (for the requested column families) that are not on the current version (thus upgrading them to said current version).
http://git-wip-us.apache.org/repos/asf/cassandra/blob/728c4fa9/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index a83d3c6..08dd435 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -20,13 +20,15 @@ package org.apache.cassandra.db;
*
*/
-import java.io.File;
-import java.io.IOException;
+import java.io.*;
+import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutionException;
+import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.commons.lang3.StringUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -52,6 +54,7 @@ public class ScrubTest extends SchemaLoader
public String KEYSPACE = "Keyspace1";
public String CF = "Standard1";
public String CF3 = "Standard2";
+ public String COUNTER_CF = "Counter1";
@Test
public void testScrubOneRow() throws IOException, ExecutionException, InterruptedException, ConfigurationException
@@ -68,7 +71,7 @@ public class ScrubTest extends SchemaLoader
rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
assertEquals(1, rows.size());
- CompactionManager.instance.performScrub(cfs);
+ CompactionManager.instance.performScrub(cfs, false);
// check data is still there
rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
@@ -76,6 +79,53 @@ public class ScrubTest extends SchemaLoader
}
@Test
+ public void testScrubCorruptedCounterRow() throws IOException, InterruptedException, ExecutionException
+ {
+ CompactionManager.instance.disableAutoCompaction();
+ Keyspace keyspace = Keyspace.open(KEYSPACE);
+ ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
+ cfs.clearUnsafe();
+
+ fillCounterCF(cfs, 2);
+
+ List<Row> rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
+ assertEquals(2, rows.size());
+
+ SSTableReader sstable = cfs.getSSTables().iterator().next();
+
+ // overwrite one row with garbage
+ long row0Start = sstable.getPosition(RowPosition.forKey(ByteBufferUtil.bytes("0"), sstable.partitioner), SSTableReader.Operator.EQ).position;
+ long row1Start = sstable.getPosition(RowPosition.forKey(ByteBufferUtil.bytes("1"), sstable.partitioner), SSTableReader.Operator.EQ).position;
+ long startPosition = row0Start < row1Start ? row0Start : row1Start;
+ long endPosition = row0Start < row1Start ? row1Start : row0Start;
+
+ RandomAccessFile file = new RandomAccessFile(sstable.getFilename(), "rw");
+ file.seek(startPosition);
+ file.writeBytes(StringUtils.repeat('z', (int) (endPosition - startPosition)));
+ file.close();
+
+ // with skipCorrupted == false, the scrub is expected to fail
+ Scrubber scrubber = new Scrubber(cfs, sstable, false);
+ try
+ {
+ scrubber.scrub();
+ fail("Expected a CorruptSSTableException to be thrown");
+ }
+ catch (IOError err) {}
+
+ // with skipCorrupted == true, the corrupt row will be skipped
+ scrubber = new Scrubber(cfs, sstable, true);
+ scrubber.scrub();
+ scrubber.close();
+ cfs.replaceCompactedSSTables(Collections.singletonList(sstable), Collections.singletonList(scrubber.getNewSSTable()), OperationType.SCRUB);
+ assertEquals(1, cfs.getSSTables().size());
+
+ // verify that we can read all of the rows, and there is now one less row
+ rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
+ assertEquals(1, rows.size());
+ }
+
+ @Test
public void testScrubDeletedRow() throws IOException, ExecutionException, InterruptedException, ConfigurationException
{
CompactionManager.instance.disableAutoCompaction();
@@ -89,7 +139,7 @@ public class ScrubTest extends SchemaLoader
rm.applyUnsafe();
cfs.forceBlockingFlush();
- CompactionManager.instance.performScrub(cfs);
+ CompactionManager.instance.performScrub(cfs, false);
assert cfs.getSSTables().isEmpty();
}
@@ -108,7 +158,7 @@ public class ScrubTest extends SchemaLoader
rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
assertEquals(10, rows.size());
- CompactionManager.instance.performScrub(cfs);
+ CompactionManager.instance.performScrub(cfs, false);
// check data is still there
rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
@@ -145,7 +195,6 @@ public class ScrubTest extends SchemaLoader
writer.closeAndOpenReader();
*/
-
String root = System.getProperty("corrupt-sstable-root");
assert root != null;
File rootDir = new File(root);
@@ -171,7 +220,7 @@ public class ScrubTest extends SchemaLoader
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, metadata);
- Scrubber scrubber = new Scrubber(cfs, sstable);
+ Scrubber scrubber = new Scrubber(cfs, sstable, false);
scrubber.scrub();
cfs.loadNewSSTables();
@@ -207,4 +256,20 @@ public class ScrubTest extends SchemaLoader
cfs.forceBlockingFlush();
}
-}
+
+ protected void fillCounterCF(ColumnFamilyStore cfs, int rowsPerSSTable) throws ExecutionException, InterruptedException, IOException
+ {
+ for (int i = 0; i < rowsPerSSTable; i++)
+ {
+ String key = String.valueOf(i);
+ ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(KEYSPACE, COUNTER_CF);
+ RowMutation rm = new RowMutation(KEYSPACE, ByteBufferUtil.bytes(key), cf);
+ rm.addCounter(COUNTER_CF, ByteBufferUtil.bytes("Column1"), 100);
+ CounterMutation cm = new CounterMutation(rm, ConsistencyLevel.ONE);
+ cm.apply();
+ }
+
+ cfs.forceBlockingFlush();
+ }
+
+}
\ No newline at end of file
[4/4] git commit: Merge branch 'cassandra-2.0' into trunk
Posted by al...@apache.org.
Merge branch 'cassandra-2.0' into trunk
Conflicts:
CHANGES.txt
src/java/org/apache/cassandra/tools/NodeCmd.java
src/resources/org/apache/cassandra/tools/NodeToolHelp.yaml
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/63f110b5
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/63f110b5
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/63f110b5
Branch: refs/heads/trunk
Commit: 63f110b5e058217c1d7e3d178b367b918ca2f856
Parents: fc91071 728c4fa
Author: Aleksey Yeschenko <al...@apache.org>
Authored: Mon Feb 3 23:32:23 2014 +0300
Committer: Aleksey Yeschenko <al...@apache.org>
Committed: Mon Feb 3 23:32:23 2014 +0300
----------------------------------------------------------------------
CHANGES.txt | 4 +
NEWS.txt | 12 ++-
.../apache/cassandra/db/ColumnFamilyStore.java | 4 +-
.../db/compaction/CompactionManager.java | 12 +--
.../cassandra/db/compaction/Scrubber.java | 37 ++++++---
.../cassandra/service/StorageService.java | 4 +-
.../cassandra/service/StorageServiceMBean.java | 2 +-
.../org/apache/cassandra/tools/NodeProbe.java | 4 +-
.../org/apache/cassandra/tools/NodeTool.java | 11 ++-
.../cassandra/tools/StandaloneScrubber.java | 6 +-
.../unit/org/apache/cassandra/db/ScrubTest.java | 81 ++++++++++++++++++--
11 files changed, 141 insertions(+), 36 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 6ca163a,a1a58a3..f9da65c
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,32 -1,7 +1,36 @@@
+2.1
+ * add listsnapshots command to nodetool (CASSANDRA-5742)
+ * Introduce AtomicBTreeColumns (CASSANDRA-6271)
+ * Multithreaded commitlog (CASSANDRA-3578)
+ * allocate fixed index summary memory pool and resample cold index summaries
+ to use less memory (CASSANDRA-5519)
+ * Removed multithreaded compaction (CASSANDRA-6142)
+ * Parallelize fetching rows for low-cardinality indexes (CASSANDRA-1337)
+ * change logging from log4j to logback (CASSANDRA-5883)
+ * switch to LZ4 compression for internode communication (CASSANDRA-5887)
+ * Stop using Thrift-generated Index* classes internally (CASSANDRA-5971)
+ * Remove 1.2 network compatibility code (CASSANDRA-5960)
+ * Remove leveled json manifest migration code (CASSANDRA-5996)
+ * Remove CFDefinition (CASSANDRA-6253)
+ * Use AtomicIntegerFieldUpdater in RefCountedMemory (CASSANDRA-6278)
+ * User-defined types for CQL3 (CASSANDRA-5590)
+ * Use of o.a.c.metrics in nodetool (CASSANDRA-5871, 6406)
+ * Batch read from OTC's queue and cleanup (CASSANDRA-1632)
+ * Secondary index support for collections (CASSANDRA-4511, 6383)
+ * SSTable metadata(Stats.db) format change (CASSANDRA-6356)
+ * Push composites support in the storage engine
+ (CASSANDRA-5417, CASSANDRA-6520)
+ * Add snapshot space used to cfstats (CASSANDRA-6231)
+ * Add cardinality estimator for key count estimation (CASSANDRA-5906)
+ * CF id is changed to be non-deterministic. Data dir/key cache are created
+ uniquely for CF id (CASSANDRA-5202)
+ * New counters implementation (CASSANDRA-6504)
+
+
+ 2.0.6
+ * Let scrub optionally skip broken counter partitions (CASSANDRA-5930)
+
+
2.0.5
* Reduce garbage generated by bloom filter lookups (CASSANDRA-6609)
* Add ks.cf names to tombstone logging (CASSANDRA-6597)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/NEWS.txt
----------------------------------------------------------------------
diff --cc NEWS.txt
index 72b898e,b21fbaa..185f60c
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -13,37 -13,17 +13,47 @@@ restore snapshots created with the prev
'sstableloader' tool. You can upgrade the file format of your snapshots
using the provided 'sstableupgrade' tool.
+2.1
+===
+
+New features
+------------
+ - SSTable data directory name is slightly changed. Each directory will
+ have hex string appended after CF name, e.g.
+ ks/cf-5be396077b811e3a3ab9dc4b9ac088d/
+ This hex string part represents unique ColumnFamily ID.
+ Note that existing directories are used as is, so only newly created
+ directories after upgrade have new directory name format.
+ - Saved key cache files also have ColumnFamily ID in their file name.
+
+Upgrading
+---------
+ - Rolling upgrades from anything pre-2.0.5 is not supported.
+ - For leveled compaction users, 2.0 must be atleast started before
+ upgrading to 2.1 due to the fact that the old JSON leveled
+ manifest is migrated into the sstable metadata files on startup
+ in 2.0 and this code is gone from 2.1.
+ - For size-tiered compaction users, Cassandra now defaults to ignoring
+ the coldest 5% of sstables. This can be customized with the
+ cold_reads_to_omit compaction option; 0.0 omits nothing (the old
+ behavior) and 1.0 omits everything.
+ - Multithreaded compaction has been removed.
+ - Counters implementation has been changed, replaced by a safer one with
+ less caveats, but different performance characteristics. You might have
+ to change your data model to accomodate the new implementation.
+ (See https://issues.apache.org/jira/browse/CASSANDRA-6504 and the dev
+ blog post at http://www.datastax.com/dev/blog/<PLACEHOLDER> for details).
+ 2.0.6
+ =====
+
+ New features
+ ------------
+ - Scrub can now optionally skip corrupt counter partitions. Please note
+ that this will lead to the loss of all the counter updates in the skipped
+ partition. See the --skip-corrupted option.
+
+
2.0.5
=====
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index a46cde5,38d87db..14b3571
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@@ -1298,12 -1110,12 +1298,12 @@@ public class ColumnFamilyStore implemen
return maxFile;
}
- public void forceCleanup(CounterId.OneShotRenewer renewer) throws ExecutionException, InterruptedException
+ public void forceCleanup() throws ExecutionException, InterruptedException
{
- CompactionManager.instance.performCleanup(ColumnFamilyStore.this, renewer);
+ CompactionManager.instance.performCleanup(ColumnFamilyStore.this);
}
- public void scrub(boolean disableSnapshot) throws ExecutionException, InterruptedException
+ public void scrub(boolean disableSnapshot, boolean skipCorrupted) throws ExecutionException, InterruptedException
{
// skip snapshot creation during scrub, SEE JIRA 5891
if(!disableSnapshot)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/service/StorageService.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/service/StorageServiceMBean.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/tools/NodeProbe.java
----------------------------------------------------------------------
[3/4] Merge branch 'cassandra-2.0' into trunk
Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/tools/NodeTool.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/tools/NodeTool.java
index 7c49e23,0000000..cc94b35
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/tools/NodeTool.java
+++ b/src/java/org/apache/cassandra/tools/NodeTool.java
@@@ -1,2216 -1,0 +1,2223 @@@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.tools;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.lang.management.MemoryUsage;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.text.DecimalFormat;
+import java.text.SimpleDateFormat;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+import javax.management.openmbean.TabularData;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Throwables;
+import com.google.common.collect.LinkedHashMultimap;
+import com.google.common.collect.Maps;
+
+import com.yammer.metrics.reporting.JmxReporter;
+import io.airlift.command.*;
+import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutorMBean;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.ColumnFamilyStoreMBean;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.compaction.CompactionManagerMBean;
+import org.apache.cassandra.db.compaction.OperationType;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.locator.EndpointSnitchInfoMBean;
+import org.apache.cassandra.net.MessagingServiceMBean;
+import org.apache.cassandra.service.CacheServiceMBean;
+import org.apache.cassandra.streaming.ProgressInfo;
+import org.apache.cassandra.streaming.SessionInfo;
+import org.apache.cassandra.streaming.StreamState;
+import org.apache.cassandra.utils.EstimatedHistogram;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Throwables.getStackTraceAsString;
+import static com.google.common.collect.Iterables.toArray;
+import static com.google.common.collect.Lists.newArrayList;
+import static java.lang.Integer.parseInt;
+import static java.lang.String.format;
+import static org.apache.commons.lang3.ArrayUtils.EMPTY_STRING_ARRAY;
+import static org.apache.commons.lang3.StringUtils.EMPTY;
+import static org.apache.commons.lang3.StringUtils.join;
+
+public class NodeTool
+{
+ private static final String HISTORYFILE = "nodetool.history";
+
+ public static void main(String... args)
+ {
+ List<Class<? extends Runnable>> commands = newArrayList(
+ Help.class,
+ Info.class,
+ Ring.class,
+ NetStats.class,
+ CfStats.class,
+ CfHistograms.class,
+ Cleanup.class,
+ ClearSnapshot.class,
+ Compact.class,
+ Scrub.class,
+ Flush.class,
+ UpgradeSSTable.class,
+ DisableAutoCompaction.class,
+ EnableAutoCompaction.class,
+ CompactionStats.class,
+ CompactionHistory.class,
+ Decommission.class,
+ DescribeCluster.class,
+ DisableBinary.class,
+ EnableBinary.class,
+ EnableGossip.class,
+ DisableGossip.class,
+ EnableHandoff.class,
+ EnableThrift.class,
+ GetCompactionThreshold.class,
+ GetCompactionThroughput.class,
+ GetStreamThroughput.class,
+ GetEndpoints.class,
+ GetSSTables.class,
+ GossipInfo.class,
+ InvalidateKeyCache.class,
+ InvalidateRowCache.class,
+ InvalidateCounterCache.class,
+ Join.class,
+ Move.class,
+ PauseHandoff.class,
+ ResumeHandoff.class,
+ ProxyHistograms.class,
+ Rebuild.class,
+ Refresh.class,
+ RemoveToken.class,
+ RemoveNode.class,
+ Repair.class,
+ SetCacheCapacity.class,
+ SetCompactionThreshold.class,
+ SetCompactionThroughput.class,
+ SetStreamThroughput.class,
+ SetTraceProbability.class,
+ Snapshot.class,
+ ListSnapshots.class,
+ Status.class,
+ StatusBinary.class,
+ StatusThrift.class,
+ Stop.class,
+ StopDaemon.class,
+ Version.class,
+ DescribeRing.class,
+ RebuildIndex.class,
+ RangeKeySample.class,
+ EnableBackup.class,
+ DisableBackup.class,
+ ResetLocalSchema.class,
+ ReloadTriggers.class,
+ SetCacheKeysToSave.class,
+ DisableThrift.class,
+ DisableHandoff.class,
+ Drain.class,
+ TruncateHints.class,
+ TpStats.class
+ );
+
+ Cli<Runnable> parser = Cli.<Runnable>builder("nodetool")
+ .withDescription("Manage your Cassandra cluster")
+ .withDefaultCommand(Help.class)
+ .withCommands(commands)
+ .build();
+
+ int status = 0;
+ try
+ {
+ Runnable parse = parser.parse(args);
+ printHistory(args);
+ parse.run();
+ } catch (IllegalArgumentException |
+ IllegalStateException |
+ ParseArgumentsMissingException |
+ ParseArgumentsUnexpectedException |
+ ParseOptionConversionException |
+ ParseOptionMissingException |
+ ParseOptionMissingValueException |
+ ParseCommandMissingException |
+ ParseCommandUnrecognizedException e)
+ {
+ badUse(e);
+ status = 1;
+ } catch (Throwable throwable)
+ {
+ err(Throwables.getRootCause(throwable));
+ status = 2;
+ }
+
+ System.exit(status);
+ }
+
+ private static void printHistory(String... args)
+ {
+ //don't bother to print if no args passed (meaning, nodetool is just printing out the sub-commands list)
+ if (args.length == 0)
+ return;
+
+ String cmdLine = Joiner.on(" ").skipNulls().join(args);
+ cmdLine = cmdLine.replaceFirst("(?<=(-pw|--password))\\s+\\S+", " <hidden>");
+
+ try (FileWriter writer = new FileWriter(new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE), true))
+ {
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
+ writer.append(sdf.format(new Date())).append(": ").append(cmdLine).append(System.lineSeparator());
+ }
+ catch (IOException ioe)
+ {
+ //quietly ignore any errors about not being able to write out history
+ }
+ }
+
+ private static void badUse(Exception e)
+ {
+ System.out.println("nodetool: " + e.getMessage());
+ System.out.println("See 'nodetool help' or 'nodetool help <command>'.");
+ }
+
+ private static void err(Throwable e)
+ {
+ System.err.println("error: " + e.getMessage());
+ System.err.println("-- StackTrace --");
+ System.err.println(getStackTraceAsString(e));
+ }
+
+ public static abstract class NodeToolCmd implements Runnable
+ {
+
+ @Option(type = OptionType.GLOBAL, name = {"-h", "--host"}, description = "Node hostname or ip address")
+ private String host = "127.0.0.1";
+
+ @Option(type = OptionType.GLOBAL, name = {"-p", "--port"}, description = "Remote jmx agent port number")
+ private String port = "7199";
+
+ @Option(type = OptionType.GLOBAL, name = {"-u", "--username"}, description = "Remote jmx agent username")
+ private String username = EMPTY;
+
+ @Option(type = OptionType.GLOBAL, name = {"-pw", "--password"}, description = "Remote jmx agent password")
+ private String password = EMPTY;
+
+ @Override
+ public void run()
+ {
+ try (NodeProbe probe = connect())
+ {
+ execute(probe);
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error while closing JMX connection", e);
+ }
+
+ }
+
+ protected abstract void execute(NodeProbe probe);
+
+ private NodeProbe connect()
+ {
+ NodeProbe nodeClient = null;
+
+ try
+ {
+ if (username.isEmpty())
+ nodeClient = new NodeProbe(host, parseInt(port));
+ else
+ nodeClient = new NodeProbe(host, parseInt(port), username, password);
+ } catch (IOException e)
+ {
+ Throwable rootCause = Throwables.getRootCause(e);
+ System.err.println(format("nodetool: Failed to connect to '%s:%s' - %s: '%s'.", host, port, rootCause.getClass().getSimpleName(), rootCause.getMessage()));
+ System.exit(1);
+ }
+
+ return nodeClient;
+ }
+
+ protected List<String> parseOptionalKeyspace(List<String> cmdArgs, NodeProbe nodeProbe)
+ {
+ List<String> keyspaces = new ArrayList<>();
+
+ if (cmdArgs == null || cmdArgs.isEmpty())
+ keyspaces.addAll(nodeProbe.getKeyspaces());
+ else
+ keyspaces.add(cmdArgs.get(0));
+
+ for (String keyspace : keyspaces)
+ {
+ if (!nodeProbe.getKeyspaces().contains(keyspace))
+ throw new IllegalArgumentException("Keyspace [" + keyspace + "] does not exist.");
+ }
+
+ return Collections.unmodifiableList(keyspaces);
+ }
+
+ protected String[] parseOptionalColumnFamilies(List<String> cmdArgs)
+ {
+ return cmdArgs.size() <= 1 ? EMPTY_STRING_ARRAY : toArray(cmdArgs.subList(1, cmdArgs.size()), String.class);
+ }
+ }
+
+ @Command(name = "info", description = "Print node information (uptime, load, ...)")
+ public static class Info extends NodeToolCmd
+ {
+ @Option(name = {"-T", "--tokens"}, description = "Display all tokens")
+ private boolean tokens = false;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ boolean gossipInitialized = probe.isInitialized();
+
+ System.out.printf("%-17s: %s%n", "ID", probe.getLocalHostId());
+ System.out.printf("%-17s: %s%n", "Gossip active", gossipInitialized);
+ System.out.printf("%-17s: %s%n", "Thrift active", probe.isThriftServerRunning());
+ System.out.printf("%-17s: %s%n", "Native Transport active", probe.isNativeTransportRunning());
+ System.out.printf("%-17s: %s%n", "Load", probe.getLoadString());
+ if (gossipInitialized)
+ System.out.printf("%-17s: %s%n", "Generation No", probe.getCurrentGenerationNumber());
+ else
+ System.out.printf("%-17s: %s%n", "Generation No", 0);
+
+ // Uptime
+ long secondsUp = probe.getUptime() / 1000;
+ System.out.printf("%-17s: %d%n", "Uptime (seconds)", secondsUp);
+
+ // Memory usage
+ MemoryUsage heapUsage = probe.getHeapMemoryUsage();
+ double memUsed = (double) heapUsage.getUsed() / (1024 * 1024);
+ double memMax = (double) heapUsage.getMax() / (1024 * 1024);
+ System.out.printf("%-17s: %.2f / %.2f%n", "Heap Memory (MB)", memUsed, memMax);
+
+ // Data Center/Rack
+ System.out.printf("%-17s: %s%n", "Data Center", probe.getDataCenter());
+ System.out.printf("%-17s: %s%n", "Rack", probe.getRack());
+
+ // Exceptions
+ System.out.printf("%-17s: %s%n", "Exceptions", probe.getStorageMetric("Exceptions"));
+
+ CacheServiceMBean cacheService = probe.getCacheServiceMBean();
+
+ // Key Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds
+ System.out.printf("%-17s: entries %d, size %d (bytes), capacity %d (bytes), %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n",
+ "Key Cache",
+ probe.getCacheMetric("KeyCache", "Entries"),
+ probe.getCacheMetric("KeyCache", "Size"),
+ probe.getCacheMetric("KeyCache", "Capacity"),
+ probe.getCacheMetric("KeyCache", "Hits"),
+ probe.getCacheMetric("KeyCache", "Requests"),
+ probe.getCacheMetric("KeyCache", "HitRate"),
+ cacheService.getKeyCacheSavePeriodInSeconds());
+
+ // Row Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds
+ System.out.printf("%-17s: entries %d, size %d (bytes), capacity %d (bytes), %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n",
+ "Row Cache",
+ probe.getCacheMetric("RowCache", "Entries"),
+ probe.getCacheMetric("RowCache", "Size"),
+ probe.getCacheMetric("RowCache", "Capacity"),
+ probe.getCacheMetric("RowCache", "Hits"),
+ probe.getCacheMetric("RowCache", "Requests"),
+ probe.getCacheMetric("RowCache", "HitRate"),
+ cacheService.getRowCacheSavePeriodInSeconds());
+
+ // Counter Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds
+ System.out.printf("%-17s: entries %d, size %d (bytes), capacity %d (bytes), %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n",
+ "Counter Cache",
+ probe.getCacheMetric("CounterCache", "Entries"),
+ probe.getCacheMetric("CounterCache", "Size"),
+ probe.getCacheMetric("CounterCache", "Capacity"),
+ probe.getCacheMetric("CounterCache", "Hits"),
+ probe.getCacheMetric("CounterCache", "Requests"),
+ probe.getCacheMetric("CounterCache", "HitRate"),
+ cacheService.getCounterCacheSavePeriodInSeconds());
+
+ // Tokens
+ List<String> tokens = probe.getTokens();
+ if (tokens.size() == 1 || this.tokens)
+ for (String token : tokens)
+ System.out.printf("%-17s: %s%n", "Token", token);
+ else
+ System.out.printf("%-17s: (invoke with -T/--tokens to see all %d tokens)%n", "Token", tokens.size());
+ }
+ }
+
+ @Command(name = "ring", description = "Print information about the token ring")
+ public static class Ring extends NodeToolCmd
+ {
+ @Arguments(description = "Specify a keyspace for accurate ownership information (topology awareness)")
+ private String keyspace = null;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ Map<String, String> tokensToEndpoints = probe.getTokenToEndpointMap();
+ LinkedHashMultimap<String, String> endpointsToTokens = LinkedHashMultimap.create();
+ for (Map.Entry<String, String> entry : tokensToEndpoints.entrySet())
+ endpointsToTokens.put(entry.getValue(), entry.getKey());
+
+ int maxAddressLength = Collections.max(endpointsToTokens.keys(), new Comparator<String>()
+ {
+ @Override
+ public int compare(String first, String second)
+ {
+ return ((Integer) first.length()).compareTo(second.length());
+ }
+ }).length();
+
+ String formatPlaceholder = "%%-%ds %%-12s%%-7s%%-8s%%-16s%%-20s%%-44s%%n";
+ String format = format(formatPlaceholder, maxAddressLength);
+
+ // Calculate per-token ownership of the ring
+ Map<InetAddress, Float> ownerships;
+ try
+ {
+ ownerships = probe.effectiveOwnership(keyspace);
+ } catch (IllegalStateException ex)
+ {
+ ownerships = probe.getOwnership();
+ System.out.printf("Note: Ownership information does not include topology; for complete information, specify a keyspace%n");
+ }
+ try
+ {
+ System.out.println();
+ Map<String, Map<InetAddress, Float>> perDcOwnerships = Maps.newLinkedHashMap();
+ // get the different datasets and map to tokens
+ for (Map.Entry<InetAddress, Float> ownership : ownerships.entrySet())
+ {
+ String dc = probe.getEndpointSnitchInfoProxy().getDatacenter(ownership.getKey().getHostAddress());
+ if (!perDcOwnerships.containsKey(dc))
+ perDcOwnerships.put(dc, new LinkedHashMap<InetAddress, Float>());
+ perDcOwnerships.get(dc).put(ownership.getKey(), ownership.getValue());
+ }
+ for (Map.Entry<String, Map<InetAddress, Float>> entry : perDcOwnerships.entrySet())
+ printDc(probe, format, entry.getKey(), endpointsToTokens, entry.getValue());
+ } catch (UnknownHostException e)
+ {
+ throw new RuntimeException(e);
+ }
+
+ if (DatabaseDescriptor.getNumTokens() > 1)
+ {
+ System.out.println(" Warning: \"nodetool ring\" is used to output all the tokens of a node.");
+ System.out.println(" To view status related info of a node use \"nodetool status\" instead.\n");
+ }
+ }
+
+ private void printDc(NodeProbe probe, String format,
+ String dc,
+ LinkedHashMultimap<String, String> endpointsToTokens,
+ Map<InetAddress, Float> filteredOwnerships)
+ {
+ Collection<String> liveNodes = probe.getLiveNodes();
+ Collection<String> deadNodes = probe.getUnreachableNodes();
+ Collection<String> joiningNodes = probe.getJoiningNodes();
+ Collection<String> leavingNodes = probe.getLeavingNodes();
+ Collection<String> movingNodes = probe.getMovingNodes();
+ Map<String, String> loadMap = probe.getLoadMap();
+
+ System.out.println("Datacenter: " + dc);
+ System.out.println("==========");
+
+ // get the total amount of replicas for this dc and the last token in this dc's ring
+ List<String> tokens = new ArrayList<>();
+ String lastToken = "";
+
+ for (Map.Entry<InetAddress, Float> entry : filteredOwnerships.entrySet())
+ {
+ tokens.addAll(endpointsToTokens.get(entry.getKey().getHostAddress()));
+ lastToken = tokens.get(tokens.size() - 1);
+ }
+
+
+ System.out.printf(format, "Address", "Rack", "Status", "State", "Load", "Owns", "Token");
+
+ if (filteredOwnerships.size() > 1)
+ System.out.printf(format, "", "", "", "", "", "", lastToken);
+ else
+ System.out.println();
+
+ for (Map.Entry<InetAddress, Float> entry : filteredOwnerships.entrySet())
+ {
+ String endpoint = entry.getKey().getHostAddress();
+ for (String token : endpointsToTokens.get(endpoint))
+ {
+ String rack;
+ try
+ {
+ rack = probe.getEndpointSnitchInfoProxy().getRack(endpoint);
+ } catch (UnknownHostException e)
+ {
+ rack = "Unknown";
+ }
+
+ String status = liveNodes.contains(endpoint)
+ ? "Up"
+ : deadNodes.contains(endpoint)
+ ? "Down"
+ : "?";
+
+ String state = "Normal";
+
+ if (joiningNodes.contains(endpoint))
+ state = "Joining";
+ else if (leavingNodes.contains(endpoint))
+ state = "Leaving";
+ else if (movingNodes.contains(endpoint))
+ state = "Moving";
+
+ String load = loadMap.containsKey(endpoint)
+ ? loadMap.get(endpoint)
+ : "?";
+ String owns = new DecimalFormat("##0.00%").format(entry.getValue());
+ System.out.printf(format, endpoint, rack, status, state, load, owns, token);
+ }
+ }
+ System.out.println();
+ }
+ }
+
+ @Command(name = "netstats", description = "Print network information on provided host (connecting node by default)")
+ public static class NetStats extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ System.out.printf("Mode: %s%n", probe.getOperationMode());
+ Set<StreamState> statuses = probe.getStreamStatus();
+ if (statuses.isEmpty())
+ System.out.println("Not sending any streams.");
+ for (StreamState status : statuses)
+ {
+ System.out.printf("%s %s%n", status.description, status.planId.toString());
+ for (SessionInfo info : status.sessions)
+ {
+ System.out.printf(" %s%n", info.peer.toString());
+ if (!info.receivingSummaries.isEmpty())
+ {
+ System.out.printf(" Receiving %d files, %d bytes total%n", info.getTotalFilesToReceive(), info.getTotalSizeToReceive());
+ for (ProgressInfo progress : info.getReceivingFiles())
+ {
+ System.out.printf(" %s%n", progress.toString());
+ }
+ }
+ if (!info.sendingSummaries.isEmpty())
+ {
+ System.out.printf(" Sending %d files, %d bytes total%n", info.getTotalFilesToSend(), info.getTotalSizeToSend());
+ for (ProgressInfo progress : info.getSendingFiles())
+ {
+ System.out.printf(" %s%n", progress.toString());
+ }
+ }
+ }
+ }
+
+ System.out.printf("Read Repair Statistics:%nAttempted: %d%nMismatch (Blocking): %d%nMismatch (Background): %d%n", probe.getReadRepairAttempted(), probe.getReadRepairRepairedBlocking(), probe.getReadRepairRepairedBackground());
+
+ MessagingServiceMBean ms = probe.msProxy;
+ System.out.printf("%-25s", "Pool Name");
+ System.out.printf("%10s", "Active");
+ System.out.printf("%10s", "Pending");
+ System.out.printf("%15s%n", "Completed");
+
+ int pending;
+ long completed;
+
+ pending = 0;
+ for (int n : ms.getCommandPendingTasks().values())
+ pending += n;
+ completed = 0;
+ for (long n : ms.getCommandCompletedTasks().values())
+ completed += n;
+ System.out.printf("%-25s%10s%10s%15s%n", "Commands", "n/a", pending, completed);
+
+ pending = 0;
+ for (int n : ms.getResponsePendingTasks().values())
+ pending += n;
+ completed = 0;
+ for (long n : ms.getResponseCompletedTasks().values())
+ completed += n;
+ System.out.printf("%-25s%10s%10s%15s%n", "Responses", "n/a", pending, completed);
+ }
+ }
+
+ @Command(name = "cfstats", description = "Print statistics on column families")
+ public static class CfStats extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace.cfname>...]", description = "List of column families (or keyspace) names")
+ private List<String> cfnames = new ArrayList<>();
+
+ @Option(name = "-i", description = "Ignore the list of column families and display the remaining cfs")
+ private boolean ignore = false;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ OptionFilter filter = new OptionFilter(ignore, cfnames);
+ Map<String, List<ColumnFamilyStoreMBean>> cfstoreMap = new HashMap<>();
+
+ // get a list of column family stores
+ Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> cfamilies = probe.getColumnFamilyStoreMBeanProxies();
+
+ while (cfamilies.hasNext())
+ {
+ Map.Entry<String, ColumnFamilyStoreMBean> entry = cfamilies.next();
+ String keyspaceName = entry.getKey();
+ ColumnFamilyStoreMBean cfsProxy = entry.getValue();
+
+ if (!cfstoreMap.containsKey(keyspaceName) && filter.isColumnFamilyIncluded(entry.getKey(), cfsProxy.getColumnFamilyName()))
+ {
+ List<ColumnFamilyStoreMBean> columnFamilies = new ArrayList<>();
+ columnFamilies.add(cfsProxy);
+ cfstoreMap.put(keyspaceName, columnFamilies);
+ } else if (filter.isColumnFamilyIncluded(entry.getKey(), cfsProxy.getColumnFamilyName()))
+ {
+ cfstoreMap.get(keyspaceName).add(cfsProxy);
+ }
+ }
+
+ // make sure all specified kss and cfs exist
+ filter.verifyKeyspaces(probe.getKeyspaces());
+ filter.verifyColumnFamilies();
+
+ // print out the table statistics
+ for (Map.Entry<String, List<ColumnFamilyStoreMBean>> entry : cfstoreMap.entrySet())
+ {
+ String keyspaceName = entry.getKey();
+ List<ColumnFamilyStoreMBean> columnFamilies = entry.getValue();
+ long keyspaceReadCount = 0;
+ long keyspaceWriteCount = 0;
+ int keyspacePendingTasks = 0;
+ double keyspaceTotalReadTime = 0.0f;
+ double keyspaceTotalWriteTime = 0.0f;
+
+ System.out.println("Keyspace: " + keyspaceName);
+ for (ColumnFamilyStoreMBean cfstore : columnFamilies)
+ {
+ String cfName = cfstore.getColumnFamilyName();
+ long writeCount = ((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "WriteLatency")).getCount();
+ long readCount = ((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "ReadLatency")).getCount();
+
+ if (readCount > 0)
+ {
+ keyspaceReadCount += readCount;
+ keyspaceTotalReadTime += (long) probe.getColumnFamilyMetric(keyspaceName, cfName, "ReadTotalLatency");
+ }
+ if (writeCount > 0)
+ {
+ keyspaceWriteCount += writeCount;
+ keyspaceTotalWriteTime += (long) probe.getColumnFamilyMetric(keyspaceName, cfName, "WriteTotalLatency");
+ }
+ keyspacePendingTasks += (int) probe.getColumnFamilyMetric(keyspaceName, cfName, "PendingTasks");
+ }
+
+ double keyspaceReadLatency = keyspaceReadCount > 0
+ ? keyspaceTotalReadTime / keyspaceReadCount / 1000
+ : Double.NaN;
+ double keyspaceWriteLatency = keyspaceWriteCount > 0
+ ? keyspaceTotalWriteTime / keyspaceWriteCount / 1000
+ : Double.NaN;
+
+ System.out.println("\tRead Count: " + keyspaceReadCount);
+ System.out.println("\tRead Latency: " + format("%s", keyspaceReadLatency) + " ms.");
+ System.out.println("\tWrite Count: " + keyspaceWriteCount);
+ System.out.println("\tWrite Latency: " + format("%s", keyspaceWriteLatency) + " ms.");
+ System.out.println("\tPending Tasks: " + keyspacePendingTasks);
+
+ // print out column family statistics for this keyspace
+ for (ColumnFamilyStoreMBean cfstore : columnFamilies)
+ {
+ String cfName = cfstore.getColumnFamilyName();
+ if (cfName.contains("."))
+ System.out.println("\t\tTable (index): " + cfName);
+ else
+ System.out.println("\t\tTable: " + cfName);
+
+ System.out.println("\t\tSSTable count: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "LiveSSTableCount"));
+
+ int[] leveledSStables = cfstore.getSSTableCountPerLevel();
+ if (leveledSStables != null)
+ {
+ System.out.print("\t\tSSTables in each level: [");
+ for (int level = 0; level < leveledSStables.length; level++)
+ {
+ int count = leveledSStables[level];
+ System.out.print(count);
+ long maxCount = 4L; // for L0
+ if (level > 0)
+ maxCount = (long) Math.pow(10, level);
+ // show max threshold for level when exceeded
+ if (count > maxCount)
+ System.out.print("/" + maxCount);
+
+ if (level < leveledSStables.length - 1)
+ System.out.print(", ");
+ else
+ System.out.println("]");
+ }
+ }
+ System.out.println("\t\tSpace used (live), bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "LiveDiskSpaceUsed"));
+ System.out.println("\t\tSpace used (total), bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "TotalDiskSpaceUsed"));
+ System.out.println("\t\tSpace used by snapshots (total), bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "SnapshotsSize"));
+ System.out.println("\t\tSSTable Compression Ratio: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "CompressionRatio"));
+ System.out.println("\t\tMemtable cell count: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "MemtableColumnsCount"));
+ System.out.println("\t\tMemtable data size, bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "MemtableDataSize"));
+ System.out.println("\t\tMemtable switch count: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "MemtableSwitchCount"));
+ System.out.println("\t\tLocal read count: " + ((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "ReadLatency")).getCount());
+ double localReadLatency = ((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "ReadLatency")).getMean() / 1000;
+ double localRLatency = localReadLatency > 0 ? localReadLatency : Double.NaN;
+ System.out.printf("\t\tLocal read latency: %01.3f ms%n", localRLatency);
+ System.out.println("\t\tLocal write count: " + ((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "WriteLatency")).getCount());
+ double localWriteLatency = ((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "WriteLatency")).getMean() / 1000;
+ double localWLatency = localWriteLatency > 0 ? localWriteLatency : Double.NaN;
+ System.out.printf("\t\tLocal write latency: %01.3f ms%n", localWLatency);
+ System.out.println("\t\tPending tasks: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "PendingTasks"));
+ System.out.println("\t\tBloom filter false positives: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "BloomFilterFalsePositives"));
+ System.out.println("\t\tBloom filter false ratio: " + format("%01.5f", probe.getColumnFamilyMetric(keyspaceName, cfName, "RecentBloomFilterFalseRatio")));
+ System.out.println("\t\tBloom filter space used, bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "BloomFilterDiskSpaceUsed"));
+ System.out.println("\t\tCompacted partition minimum bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "MinRowSize"));
+ System.out.println("\t\tCompacted partition maximum bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "MaxRowSize"));
+ System.out.println("\t\tCompacted partition mean bytes: " + probe.getColumnFamilyMetric(keyspaceName, cfName, "MeanRowSize"));
+ System.out.println("\t\tAverage live cells per slice (last five minutes): " + ((JmxReporter.HistogramMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "LiveScannedHistogram")).getMean());
+ System.out.println("\t\tAverage tombstones per slice (last five minutes): " + ((JmxReporter.HistogramMBean) probe.getColumnFamilyMetric(keyspaceName, cfName, "TombstoneScannedHistogram")).getMean());
+
+ System.out.println("");
+ }
+ System.out.println("----------------");
+ }
+ }
+
+ /**
+ * Used for filtering keyspaces and columnfamilies to be displayed using the cfstats command.
+ */
+ private static class OptionFilter
+ {
+ private Map<String, List<String>> filter = new HashMap<>();
+ private Map<String, List<String>> verifier = new HashMap<>();
+ private List<String> filterList = new ArrayList<>();
+ private boolean ignoreMode;
+
+ public OptionFilter(boolean ignoreMode, List<String> filterList)
+ {
+ this.filterList.addAll(filterList);
+ this.ignoreMode = ignoreMode;
+
+ for (String s : filterList)
+ {
+ String[] keyValues = s.split("\\.", 2);
+
+ // build the map that stores the ks' and cfs to use
+ if (!filter.containsKey(keyValues[0]))
+ {
+ filter.put(keyValues[0], new ArrayList<String>());
+ verifier.put(keyValues[0], new ArrayList<String>());
+
+ if (keyValues.length == 2)
+ {
+ filter.get(keyValues[0]).add(keyValues[1]);
+ verifier.get(keyValues[0]).add(keyValues[1]);
+ }
+ } else
+ {
+ if (keyValues.length == 2)
+ {
+ filter.get(keyValues[0]).add(keyValues[1]);
+ verifier.get(keyValues[0]).add(keyValues[1]);
+ }
+ }
+ }
+ }
+
+ public boolean isColumnFamilyIncluded(String keyspace, String columnFamily)
+ {
+ // supplying empty params list is treated as wanting to display all kss & cfs
+ if (filterList.isEmpty())
+ return !ignoreMode;
+
+ List<String> cfs = filter.get(keyspace);
+
+ // no such keyspace is in the map
+ if (cfs == null)
+ return ignoreMode;
+ // only a keyspace with no cfs was supplied
+ // so ignore or include (based on the flag) every column family in specified keyspace
+ else if (cfs.size() == 0)
+ return !ignoreMode;
+
+ // keyspace exists, and it contains specific cfs
+ verifier.get(keyspace).remove(columnFamily);
+ return ignoreMode ^ cfs.contains(columnFamily);
+ }
+
+ public void verifyKeyspaces(List<String> keyspaces)
+ {
+ for (String ks : verifier.keySet())
+ if (!keyspaces.contains(ks))
+ throw new IllegalArgumentException("Unknown keyspace: " + ks);
+ }
+
+ public void verifyColumnFamilies()
+ {
+ for (String ks : filter.keySet())
+ if (verifier.get(ks).size() > 0)
+ throw new IllegalArgumentException("Unknown column families: " + verifier.get(ks).toString() + " in keyspace: " + ks);
+ }
+ }
+ }
+
+ @Command(name = "cfhistograms", description = "Print statistic histograms for a given column family")
+ public static class CfHistograms extends NodeToolCmd
+ {
+ @Arguments(usage = "<keyspace> <cfname>", description = "The keyspace and column family name")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 2, "cfhistograms requires ks and cf args");
+
+ String keyspace = args.get(0);
+ String cfname = args.get(1);
+
+ // calculate percentile of row size and column count
+ long[] estimatedRowSize = (long[]) probe.getColumnFamilyMetric(keyspace, cfname, "EstimatedRowSizeHistogram");
+ long[] estimatedColumnCount = (long[]) probe.getColumnFamilyMetric(keyspace, cfname, "EstimatedColumnCountHistogram");
+
+ long[] bucketOffsets = new EstimatedHistogram().getBucketOffsets();
+ EstimatedHistogram rowSizeHist = new EstimatedHistogram(bucketOffsets, estimatedRowSize);
+ EstimatedHistogram columnCountHist = new EstimatedHistogram(bucketOffsets, estimatedColumnCount);
+
+ // build arrays to store percentile values
+ double[] estimatedRowSizePercentiles = new double[7];
+ double[] estimatedColumnCountPercentiles = new double[7];
+ double[] offsetPercentiles = new double[]{0.5, 0.75, 0.95, 0.98, 0.99};
+ for (int i = 0; i < offsetPercentiles.length; i++)
+ {
+ estimatedRowSizePercentiles[i] = rowSizeHist.percentile(offsetPercentiles[i]);
+ estimatedColumnCountPercentiles[i] = columnCountHist.percentile(offsetPercentiles[i]);
+ }
+
+ // min value
+ estimatedRowSizePercentiles[5] = rowSizeHist.min();
+ estimatedColumnCountPercentiles[5] = columnCountHist.min();
+ // max value
+ estimatedRowSizePercentiles[6] = rowSizeHist.max();
+ estimatedColumnCountPercentiles[6] = columnCountHist.max();
+
+ String[] percentiles = new String[]{"50%", "75%", "95%", "98%", "99%", "Min", "Max"};
+ double[] readLatency = probe.metricPercentilesAsArray((JmxReporter.HistogramMBean) probe.getColumnFamilyMetric(keyspace, cfname, "ReadLatency"));
+ double[] writeLatency = probe.metricPercentilesAsArray((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspace, cfname, "WriteLatency"));
+ double[] sstablesPerRead = probe.metricPercentilesAsArray((JmxReporter.HistogramMBean) probe.getColumnFamilyMetric(keyspace, cfname, "SSTablesPerReadHistogram"));
+
+ System.out.println(format("%s/%s histograms", keyspace, cfname));
+ System.out.println(format("%-10s%10s%18s%18s%18s%18s",
+ "Percentile", "SSTables", "Write Latency", "Read Latency", "Partition Size", "Cell Count"));
+ System.out.println(format("%-10s%10s%18s%18s%18s%18s",
+ "", "", "(micros)", "(micros)", "(bytes)", ""));
+
+ for (int i = 0; i < percentiles.length; i++)
+ {
+ System.out.println(format("%-10s%10.2f%18.2f%18.2f%18.0f%18.0f",
+ percentiles[i],
+ sstablesPerRead[i],
+ writeLatency[i],
+ readLatency[i],
+ estimatedRowSizePercentiles[i],
+ estimatedColumnCountPercentiles[i]));
+ }
+ System.out.println();
+ }
+ }
+
+ @Command(name = "cleanup", description = "Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces")
+ public static class Cleanup extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ if (Keyspace.SYSTEM_KS.equals(keyspace))
+ continue;
+
+ try
+ {
+ probe.forceKeyspaceCleanup(keyspace, cfnames);
+ } catch (Exception e)
+ {
+ throw new RuntimeException("Error occurred during cleanup", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "clearsnapshot", description = "Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots")
+ public static class ClearSnapshot extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspaces>...] ", description = "Remove snapshots from the given keyspaces")
+ private List<String> keyspaces = new ArrayList<>();
+
+ @Option(title = "snapshot_name", name = "-t", description = "Remove the snapshot with a given name")
+ private String snapshotName = EMPTY;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ StringBuilder sb = new StringBuilder();
+
+ sb.append("Requested clearing snapshot(s) for ");
+
+ if (keyspaces.isEmpty())
+ sb.append("[all keyspaces]");
+ else
+ sb.append("[").append(join(keyspaces, ", ")).append("]");
+
+ if (!snapshotName.isEmpty())
+ sb.append(" with snapshot name [").append(snapshotName).append("]");
+
+ System.out.println(sb.toString());
+
+ try
+ {
+ probe.clearSnapshot(snapshotName, toArray(keyspaces, String.class));
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error during clearing snapshots", e);
+ }
+ }
+ }
+
+ @Command(name = "compact", description = "Force a (major) compaction on one or more column families")
+ public static class Compact extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
+ probe.forceKeyspaceCompaction(keyspace, cfnames);
+ } catch (Exception e)
+ {
+ throw new RuntimeException("Error occurred during compaction", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "flush", description = "Flush one or more column families")
+ public static class Flush extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
+ probe.forceKeyspaceFlush(keyspace, cfnames);
+ } catch (Exception e)
+ {
+ throw new RuntimeException("Error occurred during flushing", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "scrub", description = "Scrub (rebuild sstables for) one or more column families")
+ public static class Scrub extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
- @Option(title = "disable_snapshot", name = {"-ns", "--no-snapshot"}, description = "Scrubbed CFs will be snapshotted first, if disableSnapshot is false. (default false)")
++ @Option(title = "disable_snapshot",
++ name = {"-ns", "--no-snapshot"},
++ description = "Scrubbed CFs will be snapshotted first, if disableSnapshot is false. (default false)")
+ private boolean disableSnapshot = false;
+
++ @Option(title = "skip_corrupted",
++ name = {"-s", "--skip-corrupted"},
++ description = "Skip corrupted partitions even when scrubbing counter tables. (default false)")
++ private boolean skipCorrupted = false;
++
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
- probe.scrub(disableSnapshot, keyspace, cfnames);
++ probe.scrub(disableSnapshot, skipCorrupted, keyspace, cfnames);
+ } catch (Exception e)
+ {
+ throw new RuntimeException("Error occurred during flushing", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "disableautocompaction", description = "Disable autocompaction for the given keyspace and column family")
+ public static class DisableAutoCompaction extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
+ probe.disableAutoCompaction(keyspace, cfnames);
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error occurred during disabling auto-compaction", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "enableautocompaction", description = "Enable autocompaction for the given keyspace and column family")
+ public static class EnableAutoCompaction extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
+ probe.enableAutoCompaction(keyspace, cfnames);
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error occurred during enabling auto-compaction", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "upgradesstables", description = "Rewrite sstables (for the requested column families) that are not on the current version (thus upgrading them to said current version)")
+ public static class UpgradeSSTable extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Option(title = "include_all", name = {"-a", "--include-all-sstables"}, description = "Use -a to include all sstables, even those already on the current version")
+ private boolean includeAll = false;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
+ probe.upgradeSSTables(keyspace, !includeAll, cfnames);
+ } catch (Exception e)
+ {
+ throw new RuntimeException("Error occurred during enabling auto-compaction", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "compactionstats", description = "Print statistics on compactions")
+ public static class CompactionStats extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ int compactionThroughput = probe.getCompactionThroughput();
+ CompactionManagerMBean cm = probe.getCompactionManagerProxy();
+ System.out.println("pending tasks: " + probe.getCompactionMetric("PendingTasks"));
+ if (cm.getCompactions().size() > 0)
+ System.out.printf("%25s%16s%16s%16s%16s%10s%10s%n", "compaction type", "keyspace", "table", "completed", "total", "unit", "progress");
+ long remainingBytes = 0;
+ for (Map<String, String> c : cm.getCompactions())
+ {
+ String percentComplete = new Long(c.get("total")) == 0
+ ? "n/a"
+ : new DecimalFormat("0.00").format((double) new Long(c.get("completed")) / new Long(c.get("total")) * 100) + "%";
+ System.out.printf("%25s%16s%16s%16s%16s%10s%10s%n", c.get("taskType"), c.get("keyspace"), c.get("columnfamily"), c.get("completed"), c.get("total"), c.get("unit"), percentComplete);
+ if (c.get("taskType").equals(OperationType.COMPACTION.toString()))
+ remainingBytes += (new Long(c.get("total")) - new Long(c.get("completed")));
+ }
+ long remainingTimeInSecs = compactionThroughput == 0 || remainingBytes == 0
+ ? -1
+ : (remainingBytes) / (1024L * 1024L * compactionThroughput);
+ String remainingTime = remainingTimeInSecs < 0
+ ? "n/a"
+ : format("%dh%02dm%02ds", remainingTimeInSecs / 3600, (remainingTimeInSecs % 3600) / 60, (remainingTimeInSecs % 60));
+
+ System.out.printf("%25s%10s%n", "Active compaction remaining time : ", remainingTime);
+ }
+ }
+
+ @Command(name = "compactionhistory", description = "Print history of compaction")
+ public static class CompactionHistory extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ System.out.println("Compaction History: ");
+
+ TabularData tabularData = probe.getCompactionHistory();
+ if (tabularData.isEmpty())
+ {
+ System.out.printf("There is no compaction history");
+ return;
+ }
+
+ String format = "%-41s%-19s%-29s%-26s%-15s%-15s%s%n";
+ List<String> indexNames = tabularData.getTabularType().getIndexNames();
+ System.out.printf(format, toArray(indexNames, Object.class));
+
+ Set<?> values = tabularData.keySet();
+ for (Object eachValue : values)
+ {
+ List<?> value = (List<?>) eachValue;
+ System.out.printf(format, toArray(value, Object.class));
+ }
+ }
+ }
+
+ @Command(name = "decommission", description = "Decommission the *node I am connecting to*")
+ public static class Decommission extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ try
+ {
+ probe.decommission();
+ } catch (InterruptedException e)
+ {
+ throw new RuntimeException("Error decommissioning node", e);
+ }
+ }
+ }
+
+ @Command(name = "describecluster", description = "Print the name, snitch, partitioner and schema version of a cluster")
+ public static class DescribeCluster extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ // display cluster name, snitch and partitioner
+ System.out.println("Cluster Information:");
+ System.out.println("\tName: " + probe.getClusterName());
+ System.out.println("\tSnitch: " + probe.getEndpointSnitchInfoProxy().getSnitchName());
+ System.out.println("\tPartitioner: " + probe.getPartitioner());
+
+ // display schema version for each node
+ System.out.println("\tSchema versions:");
+ Map<String, List<String>> schemaVersions = probe.getSpProxy().getSchemaVersions();
+ for (String version : schemaVersions.keySet())
+ {
+ System.out.println(format("\t\t%s: %s%n", version, schemaVersions.get(version)));
+ }
+ }
+ }
+
+ @Command(name = "disablebinary", description = "Disable native transport (binary protocol)")
+ public static class DisableBinary extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.stopNativeTransport();
+ }
+ }
+
+ @Command(name = "enablebinary", description = "Reenable native transport (binary protocol)")
+ public static class EnableBinary extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.startNativeTransport();
+ }
+ }
+
+ @Command(name = "enablegossip", description = "Reenable gossip")
+ public static class EnableGossip extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.startGossiping();
+ }
+ }
+
+ @Command(name = "disablegossip", description = "Disable gossip (effectively marking the node down)")
+ public static class DisableGossip extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.stopGossiping();
+ }
+ }
+
+ @Command(name = "enablehandoff", description = "Reenable the future hints storing on the current node")
+ public static class EnableHandoff extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.enableHintedHandoff();
+ }
+ }
+
+ @Command(name = "enablethrift", description = "Reenable thrift server")
+ public static class EnableThrift extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.startThriftServer();
+ }
+ }
+
+ @Command(name = "getcompactionthreshold", description = "Print min and max compaction thresholds for a given column family")
+ public static class GetCompactionThreshold extends NodeToolCmd
+ {
+ @Arguments(usage = "<keyspace> <cfname>", description = "The keyspace with a column family")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 2, "getcompactionthreshold requires ks and cf args");
+ String ks = args.get(0);
+ String cf = args.get(1);
+
+ ColumnFamilyStoreMBean cfsProxy = probe.getCfsProxy(ks, cf);
+ System.out.println("Current compaction thresholds for " + ks + "/" + cf + ": \n" +
+ " min = " + cfsProxy.getMinimumCompactionThreshold() + ", " +
+ " max = " + cfsProxy.getMaximumCompactionThreshold());
+ }
+ }
+
+ @Command(name = "getcompactionthroughput", description = "Print the MB/s throughput cap for compaction in the system")
+ public static class GetCompactionThroughput extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ System.out.println("Current compaction throughput: " + probe.getCompactionThroughput() + " MB/s");
+ }
+ }
+
+ @Command(name = "getstreamthroughput", description = "Print the MB/s throughput cap for streaming in the system")
+ public static class GetStreamThroughput extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ System.out.println("Current stream throughput: " + probe.getStreamThroughput() + " MB/s");
+ }
+ }
+
+ @Command(name = "getendpoints", description = "Print the end points that owns the key")
+ public static class GetEndpoints extends NodeToolCmd
+ {
+ @Arguments(usage = "<keyspace> <cfname> <key>", description = "The keyspace, the column family, and the key for which we need to find the endpoint")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 3, "getendpoints requires ks, cf and key args");
+ String ks = args.get(0);
+ String cf = args.get(1);
+ String key = args.get(2);
+
+ List<InetAddress> endpoints = probe.getEndpoints(ks, cf, key);
+ for (InetAddress endpoint : endpoints)
+ {
+ System.out.println(endpoint.getHostAddress());
+ }
+ }
+ }
+
+ @Command(name = "getsstables", description = "Print the sstable filenames that own the key")
+ public static class GetSSTables extends NodeToolCmd
+ {
+ @Arguments(usage = "<keyspace> <cfname> <key>", description = "The keyspace, the column family, and the key")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 3, "getsstables requires ks, cf and key args");
+ String ks = args.get(0);
+ String cf = args.get(1);
+ String key = args.get(2);
+
+ List<String> sstables = probe.getSSTables(ks, cf, key);
+ for (String sstable : sstables)
+ {
+ System.out.println(sstable);
+ }
+ }
+ }
+
+ @Command(name = "gossipinfo", description = "Shows the gossip information for the cluster")
+ public static class GossipInfo extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ System.out.println(probe.getGossipInfo());
+ }
+ }
+
+ @Command(name = "invalidatekeycache", description = "Invalidate the key cache")
+ public static class InvalidateKeyCache extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.invalidateKeyCache();
+ }
+ }
+
+ @Command(name = "invalidaterowcache", description = "Invalidate the row cache")
+ public static class InvalidateRowCache extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.invalidateRowCache();
+ }
+ }
+
+ @Command(name = "invalidatecountercache", description = "Invalidate the counter cache")
+ public static class InvalidateCounterCache extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.invalidateCounterCache();
+ }
+ }
+
+ @Command(name = "join", description = "Join the ring")
+ public static class Join extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkState(!probe.isJoined(), "This node has already joined the ring.");
+
+ try
+ {
+ probe.joinRing();
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error during joining the ring", e);
+ }
+ }
+ }
+
+ @Command(name = "move", description = "Move node on the token ring to a new token")
+ public static class Move extends NodeToolCmd
+ {
+ @Arguments(usage = "<new token>", description = "The new token. (for negative tokens)", required = true)
+ private String newToken = EMPTY;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ try
+ {
+ probe.move(newToken);
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error during moving node", e);
+ }
+ }
+ }
+
+ @Command(name = "pausehandoff", description = "Pause hints delivery process")
+ public static class PauseHandoff extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.pauseHintsDelivery();
+ }
+ }
+
+ @Command(name = "resumehandoff", description = "Resume hints delivery process")
+ public static class ResumeHandoff extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.resumeHintsDelivery();
+ }
+ }
+
+
+ @Command(name = "proxyhistograms", description = "Print statistic histograms for network operations")
+ public static class ProxyHistograms extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ String[] percentiles = new String[]{"50%", "75%", "95%", "98%", "99%", "Min", "Max"};
+ double[] readLatency = probe.metricPercentilesAsArray(probe.getProxyMetric("Read"));
+ double[] writeLatency = probe.metricPercentilesAsArray(probe.getProxyMetric("Write"));
+ double[] rangeLatency = probe.metricPercentilesAsArray(probe.getProxyMetric("RangeSlice"));
+
+ System.out.println("proxy histograms");
+ System.out.println(format("%-10s%18s%18s%18s",
+ "Percentile", "Read Latency", "Write Latency", "Range Latency"));
+ System.out.println(format("%-10s%18s%18s%18s",
+ "", "(micros)", "(micros)", "(micros)"));
+ for (int i = 0; i < percentiles.length; i++)
+ {
+ System.out.println(format("%-10s%18.2f%18.2f%18.2f",
+ percentiles[i],
+ readLatency[i],
+ writeLatency[i],
+ rangeLatency[i]));
+ }
+ System.out.println();
+ }
+ }
+
+ @Command(name = "rebuild", description = "Rebuild data by streaming from other nodes (similarly to bootstrap)")
+ public static class Rebuild extends NodeToolCmd
+ {
+ @Arguments(usage = "<src-dc-name>", description = "Name of DC from which to select sources for streaming. By default, pick any DC")
+ private String sourceDataCenterName = null;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.rebuild(sourceDataCenterName);
+ }
+ }
+
+ @Command(name = "refresh", description = "Load newly placed SSTables to the system without restart")
+ public static class Refresh extends NodeToolCmd
+ {
+ @Arguments(usage = "<keyspace> <cfname>", description = "The keyspace and column family name")
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 2, "refresh requires ks and cf args");
+ probe.loadNewSSTables(args.get(0), args.get(1));
+ }
+ }
+
+ @Deprecated
+ @Command(name = "removetoken", description = "DEPRECATED (see removenode)", hidden = true)
+ public static class RemoveToken extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ System.err.println("Warn: removetoken is deprecated, please use removenode instead");
+ }
+ }
+
+ @Command(name = "removenode", description = "Show status of current node removal, force completion of pending removal or remove provided ID")
+ public static class RemoveNode extends NodeToolCmd
+ {
+ @Arguments(title = "remove_operation", usage = "<status>|<force>|<ID>", description = "Show status of current node removal, force completion of pending removal, or remove provided ID", required = true)
+ private String removeOperation = EMPTY;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ switch (removeOperation)
+ {
+ case "status":
+ System.out.println("RemovalStatus: " + probe.getRemovalStatus());
+ break;
+ case "force":
+ System.out.println("RemovalStatus: " + probe.getRemovalStatus());
+ probe.forceRemoveCompletion();
+ break;
+ default:
+ probe.removeNode(removeOperation);
+ break;
+ }
+ }
+ }
+
+ @Command(name = "repair", description = "Repair one or more column families")
+ public static class Repair extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace> <cfnames>...]", description = "The keyspace followed by one or many column families")
+ private List<String> args = new ArrayList<>();
+
+ @Option(title = "parallel", name = {"-par", "--parallel"}, description = "Use -par to carry out a parallel repair")
+ private boolean parallel = false;
+
+ @Option(title = "local_dc", name = {"-local", "--in-local-dc"}, description = "Use -local to only repair against nodes in the same datacenter")
+ private boolean localDC = false;
+
+ @Option(title = "specific_dc", name = {"-dc", "--in-dc"}, description = "Use -dc to repair specific datacenters")
+ private List<String> specificDataCenters = new ArrayList<>();
+
+ @Option(title = "start_token", name = {"-st", "--start-token"}, description = "Use -st to specify a token at which the repair range starts")
+ private String startToken = EMPTY;
+
+ @Option(title = "end_token", name = {"-et", "--end-token"}, description = "Use -et to specify a token at which repair range ends")
+ private String endToken = EMPTY;
+
+ @Option(title = "primary_range", name = {"-pr", "--partitioner-range"}, description = "Use -pr to repair only the first range returned by the partitioner")
+ private boolean primaryRange = false;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ List<String> keyspaces = parseOptionalKeyspace(args, probe);
+ String[] cfnames = parseOptionalColumnFamilies(args);
+
+ for (String keyspace : keyspaces)
+ {
+ try
+ {
+ Collection<String> dataCenters = null;
+ if (!specificDataCenters.isEmpty())
+ dataCenters = newArrayList(specificDataCenters);
+ else if (localDC)
+ dataCenters = newArrayList(probe.getDataCenter());
+
+ if (!startToken.isEmpty() || !endToken.isEmpty())
+ probe.forceRepairRangeAsync(System.out, keyspace, !parallel, dataCenters, startToken, endToken);
+ else
+ probe.forceRepairAsync(System.out, keyspace, !parallel, dataCenters, primaryRange, cfnames);
+ } catch (Exception e)
+ {
+ throw new RuntimeException("Error occurred during repair", e);
+ }
+ }
+ }
+ }
+
+ @Command(name = "setcachecapacity", description = "Set global key, row, and counter cache capacities (in MB units)")
+ public static class SetCacheCapacity extends NodeToolCmd
+ {
+ @Arguments(title = "<key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>",
+ usage = "<key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>",
+ description = "Key cache, row cache, and counter cache (in MB)",
+ required = true)
+ private List<Integer> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 3, "setcachecapacity requires key-cache-capacity, row-cache-capacity, and counter-cache-capacity args.");
+ probe.setCacheCapacities(args.get(0), args.get(1), args.get(2));
+ }
+ }
+
+ @Command(name = "setcompactionthreshold", description = "Set min and max compaction thresholds for a given column family")
+ public static class SetCompactionThreshold extends NodeToolCmd
+ {
+ @Arguments(title = "<keyspace> <cfname> <minthreshold> <maxthreshold>", usage = "<keyspace> <cfname> <minthreshold> <maxthreshold>", description = "The keyspace, the column family, min and max threshold", required = true)
+ private List<String> args = new ArrayList<>();
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(args.size() == 4, "setcompactionthreshold requires ks, cf, min, and max threshold args.");
+
+ int minthreshold = parseInt(args.get(2));
+ int maxthreshold = parseInt(args.get(3));
+ checkArgument(minthreshold >= 0 && maxthreshold >= 0, "Thresholds must be positive integers");
+ checkArgument(minthreshold <= maxthreshold, "Min threshold cannot be greater than max.");
+ checkArgument(minthreshold >= 2 || maxthreshold == 0, "Min threshold must be at least 2");
+
+ probe.setCompactionThreshold(args.get(0), args.get(1), minthreshold, maxthreshold);
+ }
+ }
+
+ @Command(name = "setcompactionthroughput", description = "Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling")
+ public static class SetCompactionThroughput extends NodeToolCmd
+ {
+ @Arguments(title = "compaction_throughput", usage = "<value_in_mb>", description = "Value in MB, 0 to disable throttling", required = true)
+ private Integer compactionThroughput = null;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.setCompactionThroughput(compactionThroughput);
+ }
+ }
+
+ @Command(name = "setstreamthroughput", description = "Set the MB/s throughput cap for streaming in the system, or 0 to disable throttling")
+ public static class SetStreamThroughput extends NodeToolCmd
+ {
+ @Arguments(title = "stream_throughput", usage = "<value_in_mb>", description = "Value in MB, 0 to disable throttling", required = true)
+ private Integer streamThroughput = null;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ probe.setStreamThroughput(streamThroughput);
+ }
+ }
+
+ @Command(name = "settraceprobability", description = "Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default")
+ public static class SetTraceProbability extends NodeToolCmd
+ {
+ @Arguments(title = "trace_probability", usage = "<value>", description = "Trace probability between 0 and 1 (ex: 0.2)", required = true)
+ private Double traceProbability = null;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ checkArgument(traceProbability >= 0 && traceProbability <= 1, "Trace probability must be between 0 and 1");
+ probe.setTraceProbability(traceProbability);
+ }
+ }
+
+ @Command(name = "snapshot", description = "Take a snapshot of specified keyspaces or a snapshot of the specified column family")
+ public static class Snapshot extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspaces...>]", description = "List of keyspaces. By default, all keyspaces")
+ private List<String> keyspaces = new ArrayList<>();
+
+ @Option(title = "cfname", name = {"-cf", "--column-family"}, description = "The column family name (you must specify one and only one keyspace for using this option)")
+ private String columnFamily = null;
+
+ @Option(title = "tag", name = {"-t", "--tag"}, description = "The name of the snapshot")
+ private String snapshotName = Long.toString(System.currentTimeMillis());
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ try
+ {
+ StringBuilder sb = new StringBuilder();
+
+ sb.append("Requested creating snapshot(s) for ");
+
+ if (keyspaces.isEmpty())
+ sb.append("[all keyspaces]");
+ else
+ sb.append("[").append(join(keyspaces, ", ")).append("]");
+
+ if (!snapshotName.isEmpty())
+ sb.append(" with snapshot name [").append(snapshotName).append("]");
+
+ System.out.println(sb.toString());
+
+ probe.takeSnapshot(snapshotName, columnFamily, toArray(keyspaces, String.class));
+ System.out.println("Snapshot directory: " + snapshotName);
+ } catch (IOException e)
+ {
+ throw new RuntimeException("Error during taking a snapshot", e);
+ }
+ }
+ }
+
+ @Command(name = "listsnapshots", description = "Lists all the snapshots along with the size on disk and true size.")
+ public static class ListSnapshots extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ try
+ {
+ System.out.println("Snapshot Details: ");
+
+ final Map<String,TabularData> snapshotDetails = probe.getSnapshotDetails();
+ if (snapshotDetails.isEmpty())
+ {
+ System.out.printf("There are no snapshots");
+ return;
+ }
+
+ final long trueSnapshotsSize = probe.trueSnapshotsSize();
+ final String format = "%-20s%-29s%-29s%-19s%-19s%n";
+ // display column names only once
+ final List<String> indexNames = snapshotDetails.entrySet().iterator().next().getValue().getTabularType().getIndexNames();
+ System.out.printf(format, (Object[]) indexNames.toArray(new String[indexNames.size()]));
+
+ for (final Map.Entry<String, TabularData> snapshotDetail : snapshotDetails.entrySet())
+ {
+ Set<?> values = snapshotDetail.getValue().keySet();
+ for (Object eachValue : values)
+ {
+ final List<?> value = (List<?>) eachValue;
+ System.out.printf(format, value.toArray(new Object[value.size()]));
+ }
+ }
+
+ System.out.println("\nTotal TrueDiskSpaceUsed: " + FileUtils.stringifyFileSize(trueSnapshotsSize) + "\n");
+ }
+ catch (Exception e)
+ {
+ throw new RuntimeException("Error during list snapshot", e);
+ }
+ }
+ }
+
+ @Command(name = "status", description = "Print cluster information (state, load, IDs, ...)")
+ public static class Status extends NodeToolCmd
+ {
+ @Arguments(usage = "[<keyspace>]", description = "The keyspace name")
+ private String keyspace = null;
+
+ @Option(title = "resolve_ip", name = {"-r", "--resolve-ip"}, description = "Show node domain names instead of IPs")
+ private boolean resolveIp = false;
+
+ private boolean hasEffectiveOwns = false;
+ private boolean isTokenPerNode = true;
+ private int maxAddressLength = 0;
+ private String format = null;
+ private Collection<String> joiningNodes, leavingNodes, movingNodes, liveNodes, unreachableNodes;
+ private Map<String, String> loadMap, hostIDMap, tokensToEndpoints;
+ private EndpointSnitchInfoMBean epSnitchInfo;
+
+ @Override
+ public void execute(NodeProbe probe)
+ {
+ joiningNodes = probe.getJoiningNodes();
+ leavingNodes = probe.getLeavingNodes();
+ movingNodes = probe.getMovingNodes();
+ loadMap = probe.getLoadMap();
+ tokensToEndpoints = probe.getTokenToEndpointMap();
+ liveNodes = probe.getLiveNodes();
+ unreachableNodes = probe.getUnreachableNodes();
+ hostIDMap = probe.getHostIdMap();
+ epSnitchInfo = probe.getEndpointSnitchInfoProxy();
+
+ SetHostStat ownerships;
+ try
+ {
+ ownerships = new SetHostStat(probe.effectiveOwnership(keyspace));
+ hasEffectiveOwns = true;
+ } catch (IllegalStateException e)
+ {
+ ownerships = new SetHostStat(probe.getOwnership());
+ }
+
+ // More tokens then nodes (aka vnodes)?
+ if (new HashSet<>(tokensToEndpoints.values()).size() < tokensToEndpoints.keySet().size())
+ isTokenPerNode = false;
+
+ Map<String, SetHostStat> dcs = getOwnershipByDc(probe, ownerships);
+
+ findMaxAddressLength(dcs);
+
+ // Datacenters
+ for (Map.Entry<String, SetHostStat> dc : dcs.entrySet())
+ {
+ String dcHeader = String.format("Datacenter: %s%n", dc.getKey());
+ System.out.printf(dcHeader);
+ for (int i = 0; i < (dcHeader.length() - 1); i++) System.out.print('=');
+ System.out.println();
+
+ // Legend
+ System.out.println("Status=Up/Down");
+ System.out.println("|/ State=Normal/Leaving/Joining/Moving");
+
+ printNodesHeader(hasEffectiveOwns, isTokenPerNode);
+
+ // Nodes
+ for (HostStat entry : dc.getValue())
+ printNode(probe, entry, hasEffectiveOwns, isTokenPerNode);
+ }
+
+ }
+
+ private void findMaxAddressLength(Map<String, SetHostStat> dcs)
+ {
+ maxAddressLength = 0;
+ for (Map.Entry<String, SetHostStat> dc : dcs.entrySet())
+ {
+ for (HostStat stat : dc.getValue())
+ {
+ maxAddressLength = Math.max(maxAddressLength, stat.ipOrDns().length());
+ }
+ }
+ }
+
+ private void printNodesHeader(boolean hasEffectiveOwns, boolean isTokenPerNode)
+ {
+ String fmt = getFormat(hasEffectiveOwns, isTokenPerNode);
+ String owns = hasEffectiveOwns ? "Owns (effective)" : "Owns";
+
+ if (isTokenPerNode)
+ System.out.printf(fmt, "-", "-", "Address", "Load", owns, "Host ID", "Token", "Rack");
+ else
+ System.out.printf(fmt, "-", "-", "Address", "Load", "Tokens", owns, "Host ID", "Rack");
+ }
+
+ private void printNode(NodeProbe probe, HostStat hostStat, boolean hasEffectiveOwns, boolean isTokenPerNode)
+ {
+ String status, state, load, strOwns, hostID, rack, fmt;
+ fmt = getFormat(hasEffectiveOwns, isTokenPerNode);
+ String endpoint = hostStat.ip;
+ if (liveNodes.contains(endpoint)) status = "U";
+ else if (unreachableNodes.contains(endpoint)) status = "D";
+ else status = "?";
+ if (joiningNodes.contains(endpoint)) state = "J";
+ else if (leavingNodes.contains(endpoint)) state = "L";
+ else if (movingNodes.contains(endpoint)) state = "M";
+ else state = "N";
+
+ load = loadMap.containsKey(endpoint) ? loadMap.get(endpoint) : "?";
+ strOwns = new DecimalFormat("##0.0%").format(hostStat.owns);
+ hostID = hostIDMap.get(endpoint);
+
+ try
+ {
+ rack = epSnitchInfo.getRack(endpoint);
+ } catch (UnknownHostException e)
+ {
+ throw new RuntimeException(e);
+ }
+
+ if (isTokenPerNode)
+ {
+ System.out.printf(fmt, status, state, hostStat.ipOrDns(), load, strOwns, hostID, probe.getTokens(endpoint).get(0), rack);
+ } else
+ {
+ int tokens = probe.getTokens(endpoint).size();
+ System.out.printf(fmt, status, state, hostStat.ipOrDns(), load, tokens, strOwns, hostID, rack);
+ }
+ }
+
+ private String getFormat(
+ boolean hasEffectiveOwns,
+ boolean isTokenPerNode)
+ {
+ if (format == null)
+ {
+ StringBuilder buf = new StringBuilder();
+ String addressPlaceholder = String.format("%%-%ds ", maxAddressLength);
+ buf.append("%s%s "); // status
+ buf.append(addressPlaceholder); // address
+ buf.append("%-9s "); // load
+ if (!isTokenPerNode)
+ buf.append("%-6s "); // "Tokens"
+ if (hasEffectiveOwns)
+ buf.append("%-16s "); // "Owns (effective)"
+ else
+ buf.append("%-6s "); // "Owns
+ buf.append("%-36s "); // Host ID
+ if (isTokenPerNode)
+ buf.append("%-39s "); // token
+ buf.append("%s%n"); // "Rack"
+
+ format = buf.toString();
+ }
+
+ return format;
+ }
+
+ private Map<String, SetHostStat> getOwnershipByDc(NodeProbe probe, SetHostStat ownerships)
+ {
+ Map<String, SetHostStat> ownershipByDc = Maps.newLinkedHashMap();
+ EndpointSnitchInfoMBean epSnitchInfo = probe.getEndpointSnitchInfoProxy();
+
+ try
+ {
+ for (HostStat ownership : ownerships)
+ {
+ String dc = epSnitchInfo.getDatacenter(ownership.ip);
+ if (!ownershipByDc.containsKey(dc))
+ ownershipByDc.put(dc, new SetHostStat());
+ ownershipByDc.get(dc).add(ownership);
+ }
+ } catch (UnknownHostException e)
+ {
+ throw new RuntimeException(e);
+ }
+
+ return ownershipByDc;
+ }
+
+ class SetHostStat implements Iterable<HostStat>
+ {
+ final List<HostStat> hostStats = new ArrayList<>();
+
+ public SetHostStat()
+ {
+ }
+
+ public SetHostStat(Map<InetAddress, Float> ownerships)
+ {
+ for (Map.Entry<InetAddress, Float> entry : ownerships.entrySet())
+ {
+ hostStats.add(new HostStat(entry));
+ }
+ }
+
+ @Override
+ public Iterator<HostStat> iterator()
+ {
+ return hostStats.iterator();
+ }
+
+ public void add(HostStat entry)
+ {
+ hostStats.add(entry);
+ }
+ }
+
+ class HostStat
+ {
+ public final String ip;
+ public final String dns;
+ public final Float owns;
+
+ public HostStat(Map.Entry<InetAddress, Float> ownership)
+ {
+ this.ip = ownership.getKey().getHostAddress();
+ this.dns = ownership.getKey().getHostName();
+ this.owns = ownership.getValue();
+ }
+
+ public String ipOrDns()
+ {
+ if (resolveIp)
+ {
+ return dns;
+ }
+ return ip;
+ }
+ }
+ }
+
+ @Command(name = "statusbinary", description = "Status of native transport (binary protocol)")
+ public static class StatusBinary extends NodeToolCmd
+ {
+ @Override
+ public void execute(NodeProbe probe)
<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
----------------------------------------------------------------------
[2/4] Merge branch 'cassandra-2.0' into trunk
Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/63f110b5/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/ScrubTest.java
index 38c8b62,08dd435..d8ab9ff
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@@ -39,9 -41,9 +41,11 @@@ import org.apache.cassandra.db.compacti
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
import org.apache.cassandra.db.compaction.CompactionManager;
++import org.apache.cassandra.exceptions.WriteTimeoutException;
import org.apache.cassandra.io.sstable.*;
import org.apache.cassandra.utils.ByteBufferUtil;
++import static org.apache.cassandra.Util.cellname;
import static org.apache.cassandra.Util.column;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@@ -76,6 -79,53 +81,53 @@@ public class ScrubTest extends SchemaLo
}
@Test
- public void testScrubCorruptedCounterRow() throws IOException, InterruptedException, ExecutionException
++ public void testScrubCorruptedCounterRow() throws IOException, InterruptedException, ExecutionException, WriteTimeoutException
+ {
+ CompactionManager.instance.disableAutoCompaction();
+ Keyspace keyspace = Keyspace.open(KEYSPACE);
+ ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
+ cfs.clearUnsafe();
+
+ fillCounterCF(cfs, 2);
+
+ List<Row> rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
+ assertEquals(2, rows.size());
+
+ SSTableReader sstable = cfs.getSSTables().iterator().next();
+
+ // overwrite one row with garbage
+ long row0Start = sstable.getPosition(RowPosition.forKey(ByteBufferUtil.bytes("0"), sstable.partitioner), SSTableReader.Operator.EQ).position;
+ long row1Start = sstable.getPosition(RowPosition.forKey(ByteBufferUtil.bytes("1"), sstable.partitioner), SSTableReader.Operator.EQ).position;
+ long startPosition = row0Start < row1Start ? row0Start : row1Start;
+ long endPosition = row0Start < row1Start ? row1Start : row0Start;
+
+ RandomAccessFile file = new RandomAccessFile(sstable.getFilename(), "rw");
+ file.seek(startPosition);
+ file.writeBytes(StringUtils.repeat('z', (int) (endPosition - startPosition)));
+ file.close();
+
+ // with skipCorrupted == false, the scrub is expected to fail
+ Scrubber scrubber = new Scrubber(cfs, sstable, false);
+ try
+ {
+ scrubber.scrub();
+ fail("Expected a CorruptSSTableException to be thrown");
+ }
+ catch (IOError err) {}
+
+ // with skipCorrupted == true, the corrupt row will be skipped
+ scrubber = new Scrubber(cfs, sstable, true);
+ scrubber.scrub();
+ scrubber.close();
+ cfs.replaceCompactedSSTables(Collections.singletonList(sstable), Collections.singletonList(scrubber.getNewSSTable()), OperationType.SCRUB);
+ assertEquals(1, cfs.getSSTables().size());
+
+ // verify that we can read all of the rows, and there is now one less row
+ rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
+ assertEquals(1, rows.size());
+ }
+
+ @Test
public void testScrubDeletedRow() throws IOException, ExecutionException, InterruptedException, ConfigurationException
{
CompactionManager.instance.disableAutoCompaction();
@@@ -207,4 -256,20 +258,20 @@@
cfs.forceBlockingFlush();
}
+
- protected void fillCounterCF(ColumnFamilyStore cfs, int rowsPerSSTable) throws ExecutionException, InterruptedException, IOException
++ protected void fillCounterCF(ColumnFamilyStore cfs, int rowsPerSSTable) throws ExecutionException, InterruptedException, IOException, WriteTimeoutException
+ {
+ for (int i = 0; i < rowsPerSSTable; i++)
+ {
+ String key = String.valueOf(i);
+ ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(KEYSPACE, COUNTER_CF);
- RowMutation rm = new RowMutation(KEYSPACE, ByteBufferUtil.bytes(key), cf);
- rm.addCounter(COUNTER_CF, ByteBufferUtil.bytes("Column1"), 100);
++ Mutation rm = new Mutation(KEYSPACE, ByteBufferUtil.bytes(key), cf);
++ rm.addCounter(COUNTER_CF, cellname("Column1"), 100);
+ CounterMutation cm = new CounterMutation(rm, ConsistencyLevel.ONE);
+ cm.apply();
+ }
+
+ cfs.forceBlockingFlush();
+ }
+
-}
+}