You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by db...@apache.org on 2016/09/14 03:36:33 UTC
cassandra git commit: remove extraneous String.format calls inside of
logger statements
Repository: cassandra
Updated Branches:
refs/heads/trunk 57b6bbc72 -> 2dc2bb808
remove extraneous String.format calls inside of logger statements
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2dc2bb80
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2dc2bb80
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2dc2bb80
Branch: refs/heads/trunk
Commit: 2dc2bb808634be9aaf5ee1d65eb968f8c5514305
Parents: 57b6bbc
Author: Dave Brosius <db...@mebigfatguy.com>
Authored: Tue Sep 13 23:36:15 2016 -0400
Committer: Dave Brosius <db...@mebigfatguy.com>
Committed: Tue Sep 13 23:36:15 2016 -0400
----------------------------------------------------------------------
.../apache/cassandra/cache/AutoSavingCache.java | 2 +-
src/java/org/apache/cassandra/db/Memtable.java | 10 +++----
.../org/apache/cassandra/db/ReadCommand.java | 11 ++++----
.../db/commitlog/CommitLogReplayer.java | 2 +-
.../db/compaction/CompactionManager.java | 6 ++---
.../cassandra/db/compaction/CompactionTask.java | 2 +-
.../db/compaction/LeveledManifest.java | 6 ++---
.../apache/cassandra/db/lifecycle/LogFile.java | 7 +++--
.../org/apache/cassandra/dht/RangeStreamer.java | 4 +--
.../io/sstable/format/SSTableReader.java | 10 +++----
.../org/apache/cassandra/io/util/FileUtils.java | 2 +-
.../locator/ReconnectableSnitchHelper.java | 4 +--
.../org/apache/cassandra/repair/RepairJob.java | 4 +--
.../apache/cassandra/repair/RepairSession.java | 4 +--
.../cassandra/repair/StreamingRepairTask.java | 4 +--
.../org/apache/cassandra/repair/Validator.java | 4 +--
.../cassandra/schema/CompressionParams.java | 8 +++---
.../cassandra/service/MigrationManager.java | 28 ++++++++++----------
.../apache/cassandra/service/ReadCallback.java | 6 ++---
.../cassandra/service/StorageService.java | 24 ++++++++---------
.../cassandra/thrift/TServerCustomFactory.java | 6 ++---
.../apache/cassandra/thrift/ThriftServer.java | 2 +-
.../org/apache/cassandra/utils/CLibrary.java | 8 +++---
.../apache/cassandra/utils/FilterFactory.java | 2 +-
24 files changed, 82 insertions(+), 84 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/cache/AutoSavingCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 4d934f5..aa13ca3 100644
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@ -194,7 +194,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
DataInputStreamPlus in = null;
try
{
- logger.info(String.format("reading saved cache %s", dataPath));
+ logger.info("reading saved cache {}", dataPath);
in = new DataInputStreamPlus(new LengthAvailableInputStream(new BufferedInputStream(streamFactory.getInputStream(dataPath, crcPath)), dataPath.length()));
//Check the schema has not changed since CFs are looked up by name which is ambiguous
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
index 6bf7db4..a063bf4 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -115,7 +115,7 @@ public class Memtable implements Comparable<Memtable>
public static final class LastCommitLogPosition extends CommitLogPosition
{
- public LastCommitLogPosition(CommitLogPosition copy)
+ public LastCommitLogPosition(CommitLogPosition copy)
{
super(copy.segmentId, copy.position);
}
@@ -491,15 +491,15 @@ public class Memtable implements Comparable<Memtable>
}
long bytesFlushed = writer.getFilePointer();
- logger.debug(String.format("Completed flushing %s (%s) for commitlog position %s",
+ logger.debug("Completed flushing {} ({}) for commitlog position {}",
writer.getFilename(),
FBUtilities.prettyPrintMemory(bytesFlushed),
- commitLogUpperBound));
+ commitLogUpperBound);
// Update the metrics
cfs.metric.bytesFlushed.inc(bytesFlushed);
if (heavilyContendedRowCount > 0)
- logger.trace(String.format("High update contention in %d/%d partitions of %s ", heavilyContendedRowCount, toFlush.size(), Memtable.this.toString()));
+ logger.trace("High update contention in {}/{} partitions of {} ", heavilyContendedRowCount, toFlush.size(), Memtable.this);
}
public SSTableMultiWriter createFlushWriter(LifecycleTransaction txn,
@@ -511,7 +511,7 @@ public class Memtable implements Comparable<Memtable>
.commitLogIntervals(new IntervalSet<>(commitLogLowerBound.get(), commitLogUpperBound.get()));
return cfs.createSSTableMultiWriter(Descriptor.fromFilename(filename),
- (long)toFlush.size(),
+ toFlush.size(),
ActiveRepairService.UNREPAIRED_SSTABLE,
sstableMetadataCollector,
new SerializationHeader(true, cfs.metadata, columns, stats), txn);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/ReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ReadCommand.java b/src/java/org/apache/cassandra/db/ReadCommand.java
index 226b920..c6722e5 100644
--- a/src/java/org/apache/cassandra/db/ReadCommand.java
+++ b/src/java/org/apache/cassandra/db/ReadCommand.java
@@ -712,12 +712,11 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
}
catch (UnknownIndexException e)
{
- String message = String.format("Couldn't find a defined index on %s.%s with the id %s. " +
- "If an index was just created, this is likely due to the schema not " +
- "being fully propagated. Local read will proceed without using the " +
- "index. Please wait for schema agreement after index creation.",
- cfm.ksName, cfm.cfName, e.indexId.toString());
- logger.info(message);
+ logger.info("Couldn't find a defined index on {}.{} with the id {}. " +
+ "If an index was just created, this is likely due to the schema not " +
+ "being fully propagated. Local read will proceed without using the " +
+ "index. Please wait for schema agreement after index creation.",
+ cfm.ksName, cfm.cfName, e.indexId);
return Optional.empty();
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
index 091ef15..2c323b1 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
@@ -146,7 +146,7 @@ public class CommitLogReplayer implements CommitLogReadHandler
public int blockForWrites()
{
for (Map.Entry<UUID, AtomicInteger> entry : commitLogReader.getInvalidMutations())
- logger.warn(String.format("Skipped %d mutations from unknown (probably removed) CF with id %s", entry.getValue().intValue(), entry.getKey()));
+ logger.warn("Skipped {} mutations from unknown (probably removed) CF with id {}", entry.getValue(), entry.getKey());
// wait for all the writes to finish on the mutation stage
FBUtilities.waitOnFutures(futures);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 39e6efc..27058d2 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -570,7 +570,7 @@ public class CompactionManager implements CompactionManagerMBean
final Refs<SSTableReader> sstables,
final long repairedAt)
{
- Runnable runnable = new WrappedRunnable()
+ Runnable runnable = new WrappedRunnable()
{
@Override
@SuppressWarnings("resource")
@@ -856,7 +856,7 @@ public class CompactionManager implements CompactionManagerMBean
}
catch (IOException e)
{
- logger.error(String.format("forceUserDefinedCleanup failed: %s", e.getLocalizedMessage()));
+ logger.error("forceUserDefinedCleanup failed: {}", e.getLocalizedMessage());
}
}
}
@@ -1744,7 +1744,7 @@ public class CompactionManager implements CompactionManagerMBean
public void afterExecute(Runnable r, Throwable t)
{
DebuggableThreadPoolExecutor.maybeResetTraceSessionWrapper(r);
-
+
if (t == null)
t = DebuggableThreadPoolExecutor.extractThrowable(r);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index 036d522..3ddd544 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -261,7 +261,7 @@ public class CompactionTask extends AbstractCompactionTask
totalSourceRows,
totalKeysWritten,
mergeSummary));
- logger.trace(String.format("CF Total Bytes Compacted: %s", FBUtilities.prettyPrintMemory(CompactionTask.addToTotalBytesCompacted(endsize))));
+ logger.trace("CF Total Bytes Compacted: {}", FBUtilities.prettyPrintMemory(CompactionTask.addToTotalBytesCompacted(endsize)));
logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", totalKeysWritten, estimatedKeys, ((double)(totalKeysWritten - estimatedKeys)/totalKeysWritten));
cfs.getCompactionStrategyManager().compactionLogger.compaction(startTime, transaction.originals(), System.currentTimeMillis(), newSStables);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
index 094bb61..5a8f153 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
@@ -209,9 +209,9 @@ public class LeveledManifest
{
if (previous != null && current.first.compareTo(previous.last) <= 0)
{
- logger.warn(String.format("At level %d, %s [%s, %s] overlaps %s [%s, %s]. This could be caused by a bug in Cassandra 1.1.0 .. 1.1.3 or due to the fact that you have dropped sstables from another node into the data directory. " +
- "Sending back to L0. If you didn't drop in sstables, and have not yet run scrub, you should do so since you may also have rows out-of-order within an sstable",
- level, previous, previous.first, previous.last, current, current.first, current.last));
+ logger.warn("At level {}, {} [{}, {}] overlaps {} [{}, {}]. This could be caused by a bug in Cassandra 1.1.0 .. 1.1.3 or due to the fact that you have dropped sstables from another node into the data directory. " +
+ "Sending back to L0. If you didn't drop in sstables, and have not yet run scrub, you should do so since you may also have rows out-of-order within an sstable",
+ level, previous, previous.first, previous.last, current, current.first, current.last);
outOfOrderSSTables.add(current);
}
else
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
index f23613f..9691ee9 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java
@@ -187,10 +187,9 @@ final class LogFile implements AutoCloseable
// if only the last record is corrupt and all other records have matching files on disk, @see verifyRecord,
// then we simply exited whilst serializing the last record and we carry on
- logger.warn(String.format("Last record of transaction %s is corrupt or incomplete [%s], " +
- "but all previous records match state on disk; continuing",
- id,
- failedOn.error()));
+ logger.warn("Last record of transaction {} is corrupt or incomplete [{}], " +
+ "but all previous records match state on disk; continuing",
+ id, failedOn.error());
return true;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/dht/RangeStreamer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/dht/RangeStreamer.java b/src/java/org/apache/cassandra/dht/RangeStreamer.java
index 282ff04..7805120 100644
--- a/src/java/org/apache/cassandra/dht/RangeStreamer.java
+++ b/src/java/org/apache/cassandra/dht/RangeStreamer.java
@@ -179,7 +179,7 @@ public class RangeStreamer
if (logger.isTraceEnabled())
{
for (Map.Entry<Range<Token>, InetAddress> entry : rangesForKeyspace.entries())
- logger.trace(String.format("%s: range %s exists on %s", description, entry.getKey(), entry.getValue()));
+ logger.trace("{}: range {} exists on {}", description, entry.getKey(), entry.getValue());
}
for (Map.Entry<InetAddress, Collection<Range<Token>>> entry : getRangeFetchMap(rangesForKeyspace, sourceFilters, keyspaceName, useStrictConsistency).asMap().entrySet())
@@ -187,7 +187,7 @@ public class RangeStreamer
if (logger.isTraceEnabled())
{
for (Range<Token> r : entry.getValue())
- logger.trace(String.format("%s: range %s from source %s for keyspace %s", description, r, entry.getKey(), keyspaceName));
+ logger.trace("{}: range {} from source {} for keyspace {}", description, r, entry.getKey(), keyspaceName);
}
toFetch.put(keyspaceName, entry);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
index 5bfae62..b2e3eba 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
@@ -435,8 +435,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
String partitionerName = metadata.partitioner.getClass().getCanonicalName();
if (validationMetadata != null && !partitionerName.equals(validationMetadata.partitioner))
{
- logger.error(String.format("Cannot open %s; partitioner %s does not match system partitioner %s. Note that the default partitioner starting with Cassandra 1.2 is Murmur3Partitioner, so you will need to edit that to match your old partitioner if upgrading.",
- descriptor, validationMetadata.partitioner, partitionerName));
+ logger.error("Cannot open {}; partitioner {} does not match system partitioner {}. Note that the default partitioner starting with Cassandra 1.2 is Murmur3Partitioner, so you will need to edit that to match your old partitioner if upgrading.",
+ descriptor, validationMetadata.partitioner, partitionerName);
System.exit(1);
}
@@ -496,8 +496,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
String partitionerName = metadata.partitioner.getClass().getCanonicalName();
if (validationMetadata != null && !partitionerName.equals(validationMetadata.partitioner))
{
- logger.error(String.format("Cannot open %s; partitioner %s does not match system partitioner %s. Note that the default partitioner starting with Cassandra 1.2 is Murmur3Partitioner, so you will need to edit that to match your old partitioner if upgrading.",
- descriptor, validationMetadata.partitioner, partitionerName));
+ logger.error("Cannot open {}; partitioner {} does not match system partitioner {}. Note that the default partitioner starting with Cassandra 1.2 is Murmur3Partitioner, so you will need to edit that to match your old partitioner if upgrading.",
+ descriptor, validationMetadata.partitioner, partitionerName);
System.exit(1);
}
@@ -1501,7 +1501,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
protected RowIndexEntry getCachedPosition(KeyCacheKey unifiedKey, boolean updateStats)
{
- if (keyCache != null && keyCache.getCapacity() > 0 && metadata.params.caching.cacheKeys())
+ if (keyCache != null && keyCache.getCapacity() > 0 && metadata.params.caching.cacheKeys())
{
if (updateStats)
{
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/io/util/FileUtils.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/FileUtils.java b/src/java/org/apache/cassandra/io/util/FileUtils.java
index abb39d3..662dd4e 100644
--- a/src/java/org/apache/cassandra/io/util/FileUtils.java
+++ b/src/java/org/apache/cassandra/io/util/FileUtils.java
@@ -176,7 +176,7 @@ public final class FileUtils
{
assert from.exists();
if (logger.isTraceEnabled())
- logger.trace((String.format("Renaming %s to %s", from.getPath(), to.getPath())));
+ logger.trace("Renaming {} to {}", from.getPath(), to.getPath());
// this is not FSWE because usually when we see it it's because we didn't close the file before renaming it,
// and Windows is picky about that.
try
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
index 6b6182f..a6bec0c 100644
--- a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
+++ b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
@@ -63,10 +63,10 @@ public class ReconnectableSnitchHelper implements IEndpointStateChangeSubscriber
&& !MessagingService.instance().getConnectionPool(publicAddress).endPoint().equals(localAddress))
{
MessagingService.instance().getConnectionPool(publicAddress).reset(localAddress);
- logger.debug(String.format("Intiated reconnect to an Internal IP %s for the %s", localAddress, publicAddress));
+ logger.debug("Initiated reconnect to an Internal IP {} for the {}", localAddress, publicAddress);
}
}
-
+
public void beforeChange(InetAddress endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue)
{
// no-op
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/repair/RepairJob.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/repair/RepairJob.java b/src/java/org/apache/cassandra/repair/RepairJob.java
index c768db6..7fc7816 100644
--- a/src/java/org/apache/cassandra/repair/RepairJob.java
+++ b/src/java/org/apache/cassandra/repair/RepairJob.java
@@ -140,7 +140,7 @@ public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
{
public void onSuccess(List<SyncStat> stats)
{
- logger.info(String.format("[repair #%s] %s is fully synced", session.getId(), desc.columnFamily));
+ logger.info("[repair #{}] {} is fully synced", session.getId(), desc.columnFamily);
SystemDistributedKeyspace.successfulRepairJob(session.getId(), desc.keyspace, desc.columnFamily);
set(new RepairResult(desc, stats));
}
@@ -150,7 +150,7 @@ public class RepairJob extends AbstractFuture<RepairResult> implements Runnable
*/
public void onFailure(Throwable t)
{
- logger.warn(String.format("[repair #%s] %s sync failed", session.getId(), desc.columnFamily));
+ logger.warn("[repair #{}] {} sync failed", session.getId(), desc.columnFamily);
SystemDistributedKeyspace.failedRepairJob(session.getId(), desc.keyspace, desc.columnFamily, t);
setException(t);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/repair/RepairSession.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/repair/RepairSession.java b/src/java/org/apache/cassandra/repair/RepairSession.java
index cad506d..528115a 100644
--- a/src/java/org/apache/cassandra/repair/RepairSession.java
+++ b/src/java/org/apache/cassandra/repair/RepairSession.java
@@ -211,7 +211,7 @@ public class RepairSession extends AbstractFuture<RepairSessionResult> implement
return;
}
- logger.debug(String.format("[repair #%s] Repair completed between %s and %s on %s", getId(), nodes.endpoint1, nodes.endpoint2, desc.columnFamily));
+ logger.debug("[repair #{}] Repair completed between {} and {} on {}", getId(), nodes.endpoint1, nodes.endpoint2, desc.columnFamily);
task.syncComplete(success);
}
@@ -238,7 +238,7 @@ public class RepairSession extends AbstractFuture<RepairSessionResult> implement
if (terminated)
return;
- logger.info(String.format("[repair #%s] new session: will sync %s on range %s for %s.%s", getId(), repairedNodes(), ranges, keyspace, Arrays.toString(cfnames)));
+ logger.info("[repair #{}] new session: will sync {} on range {} for {}.{}", getId(), repairedNodes(), ranges, keyspace, Arrays.toString(cfnames));
Tracing.traceRepair("Syncing range {}", ranges);
SystemDistributedKeyspace.startRepairs(getId(), parentRepairSession, keyspace, cfnames, ranges, endpoints);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/repair/StreamingRepairTask.java b/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
index b6936b6..f5b2b1d 100644
--- a/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
+++ b/src/java/org/apache/cassandra/repair/StreamingRepairTask.java
@@ -55,7 +55,7 @@ public class StreamingRepairTask implements Runnable, StreamEventHandler
{
InetAddress dest = request.dst;
InetAddress preferred = SystemKeyspace.getPreferredIP(dest);
- logger.info(String.format("[streaming task #%s] Performing streaming repair of %d ranges with %s", desc.sessionId, request.ranges.size(), request.dst));
+ logger.info("[streaming task #{}] Performing streaming repair of {} ranges with {}", desc.sessionId, request.ranges.size(), request.dst);
boolean isIncremental = false;
if (desc.parentSessionId != null)
{
@@ -82,7 +82,7 @@ public class StreamingRepairTask implements Runnable, StreamEventHandler
*/
public void onSuccess(StreamState state)
{
- logger.info(String.format("[repair #%s] streaming task succeed, returning response to %s", desc.sessionId, request.initiator));
+ logger.info("[repair #{}] streaming task succeed, returning response to {}", desc.sessionId, request.initiator);
MessagingService.instance().sendOneWay(new SyncComplete(desc, request.src, request.dst, true).createMessage(), request.initiator);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/repair/Validator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/repair/Validator.java b/src/java/org/apache/cassandra/repair/Validator.java
index e51dc0e..19858d5 100644
--- a/src/java/org/apache/cassandra/repair/Validator.java
+++ b/src/java/org/apache/cassandra/repair/Validator.java
@@ -92,7 +92,7 @@ public class Validator implements Runnable
{
List<DecoratedKey> keys = new ArrayList<>();
Random random = new Random();
-
+
for (Range<Token> range : tree.ranges())
{
for (DecoratedKey sample : cfs.keySamples(range))
@@ -271,7 +271,7 @@ public class Validator implements Runnable
// respond to the request that triggered this validation
if (!initiator.equals(FBUtilities.getBroadcastAddress()))
{
- logger.info(String.format("[repair #%s] Sending completed merkle tree to %s for %s.%s", desc.sessionId, initiator, desc.keyspace, desc.columnFamily));
+ logger.info("[repair #{}] Sending completed merkle tree to {} for {}.{}", desc.sessionId, initiator, desc.keyspace, desc.columnFamily);
Tracing.traceRepair("Sending completed merkle tree to {} for {}.{}", initiator, desc.keyspace, desc.columnFamily);
}
MessagingService.instance().sendOneWay(new ValidationComplete(desc, trees).createMessage(), initiator);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/schema/CompressionParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/CompressionParams.java b/src/java/org/apache/cassandra/schema/CompressionParams.java
index 01428d0..f48a688 100644
--- a/src/java/org/apache/cassandra/schema/CompressionParams.java
+++ b/src/java/org/apache/cassandra/schema/CompressionParams.java
@@ -331,9 +331,9 @@ public final class CompressionParams
if (!hasLoggedChunkLengthWarning)
{
hasLoggedChunkLengthWarning = true;
- logger.warn(format("The %s option has been deprecated. You should use %s instead",
+ logger.warn("The {} option has been deprecated. You should use {} instead",
CHUNK_LENGTH_KB,
- CHUNK_LENGTH_IN_KB));
+ CHUNK_LENGTH_IN_KB);
}
return parseChunkLength(options.remove(CHUNK_LENGTH_KB));
@@ -380,9 +380,9 @@ public final class CompressionParams
if (options.containsKey(SSTABLE_COMPRESSION) && !hasLoggedSsTableCompressionWarning)
{
hasLoggedSsTableCompressionWarning = true;
- logger.warn(format("The %s option has been deprecated. You should use %s instead",
+ logger.warn("The {} option has been deprecated. You should use {} instead",
SSTABLE_COMPRESSION,
- CLASS));
+ CLASS);
}
return options.remove(SSTABLE_COMPRESSION);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/service/MigrationManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/MigrationManager.java b/src/java/org/apache/cassandra/service/MigrationManager.java
index ee803df..f9e4aff 100644
--- a/src/java/org/apache/cassandra/service/MigrationManager.java
+++ b/src/java/org/apache/cassandra/service/MigrationManager.java
@@ -300,7 +300,7 @@ public class MigrationManager
if (Schema.instance.getKSMetaData(ksm.name) != null)
throw new AlreadyExistsException(ksm.name);
- logger.info(String.format("Create new Keyspace: %s", ksm));
+ logger.info("Create new Keyspace: {}", ksm);
announce(SchemaKeyspace.makeCreateKeyspaceMutation(ksm, timestamp), announceLocally);
}
@@ -340,7 +340,7 @@ public class MigrationManager
else if (throwOnDuplicate && ksm.getTableOrViewNullable(cfm.cfName) != null)
throw new AlreadyExistsException(cfm.ksName, cfm.cfName);
- logger.info(String.format("Create new table: %s", cfm));
+ logger.info("Create new table: {}", cfm);
announce(SchemaKeyspace.makeCreateTableMutation(ksm, cfm, FBUtilities.timestampMicros()), announceLocally);
}
@@ -354,7 +354,7 @@ public class MigrationManager
else if (ksm.getTableOrViewNullable(view.viewName) != null)
throw new AlreadyExistsException(view.ksName, view.viewName);
- logger.info(String.format("Create new view: %s", view));
+ logger.info("Create new view: {}", view);
announce(SchemaKeyspace.makeCreateViewMutation(ksm, view, FBUtilities.timestampMicros()), announceLocally);
}
@@ -366,14 +366,14 @@ public class MigrationManager
public static void announceNewFunction(UDFunction udf, boolean announceLocally)
{
- logger.info(String.format("Create scalar function '%s'", udf.name()));
+ logger.info("Create scalar function '{}'", udf.name());
KeyspaceMetadata ksm = Schema.instance.getKSMetaData(udf.name().keyspace);
announce(SchemaKeyspace.makeCreateFunctionMutation(ksm, udf, FBUtilities.timestampMicros()), announceLocally);
}
public static void announceNewAggregate(UDAggregate udf, boolean announceLocally)
{
- logger.info(String.format("Create aggregate function '%s'", udf.name()));
+ logger.info("Create aggregate function '{}'", udf.name());
KeyspaceMetadata ksm = Schema.instance.getKSMetaData(udf.name().keyspace);
announce(SchemaKeyspace.makeCreateAggregateMutation(ksm, udf, FBUtilities.timestampMicros()), announceLocally);
}
@@ -391,7 +391,7 @@ public class MigrationManager
if (oldKsm == null)
throw new ConfigurationException(String.format("Cannot update non existing keyspace '%s'.", ksm.name));
- logger.info(String.format("Update Keyspace '%s' From %s To %s", ksm.name, oldKsm, ksm));
+ logger.info("Update Keyspace '{}' From {} To {}", ksm.name, oldKsm, ksm);
announce(SchemaKeyspace.makeCreateKeyspaceMutation(ksm.name, ksm.params, FBUtilities.timestampMicros()), announceLocally);
}
@@ -411,7 +411,7 @@ public class MigrationManager
oldCfm.validateCompatibility(cfm);
- logger.info(String.format("Update table '%s/%s' From %s To %s", cfm.ksName, cfm.cfName, oldCfm, cfm));
+ logger.info("Update table '{}/{}' From {} To {}", cfm.ksName, cfm.cfName, oldCfm, cfm);
announce(SchemaKeyspace.makeUpdateTableMutation(ksm, oldCfm, cfm, FBUtilities.timestampMicros()), announceLocally);
}
@@ -426,13 +426,13 @@ public class MigrationManager
oldView.metadata.validateCompatibility(view.metadata);
- logger.info(String.format("Update view '%s/%s' From %s To %s", view.ksName, view.viewName, oldView, view));
+ logger.info("Update view '{}/{}' From {} To {}", view.ksName, view.viewName, oldView, view);
announce(SchemaKeyspace.makeUpdateViewMutation(ksm, oldView, view, FBUtilities.timestampMicros()), announceLocally);
}
public static void announceTypeUpdate(UserType updatedType, boolean announceLocally)
{
- logger.info(String.format("Update type '%s.%s' to %s", updatedType.keyspace, updatedType.getNameAsString(), updatedType));
+ logger.info("Update type '{}.{}' to {}", updatedType.keyspace, updatedType.getNameAsString(), updatedType);
announceNewType(updatedType, announceLocally);
}
@@ -447,7 +447,7 @@ public class MigrationManager
if (oldKsm == null)
throw new ConfigurationException(String.format("Cannot drop non existing keyspace '%s'.", ksName));
- logger.info(String.format("Drop Keyspace '%s'", oldKsm.name));
+ logger.info("Drop Keyspace '{}'", oldKsm.name);
announce(SchemaKeyspace.makeDropKeyspaceMutation(oldKsm, FBUtilities.timestampMicros()), announceLocally);
}
@@ -463,7 +463,7 @@ public class MigrationManager
throw new ConfigurationException(String.format("Cannot drop non existing table '%s' in keyspace '%s'.", cfName, ksName));
KeyspaceMetadata ksm = Schema.instance.getKSMetaData(ksName);
- logger.info(String.format("Drop table '%s/%s'", oldCfm.ksName, oldCfm.cfName));
+ logger.info("Drop table '{}/{}'", oldCfm.ksName, oldCfm.cfName);
announce(SchemaKeyspace.makeDropTableMutation(ksm, oldCfm, FBUtilities.timestampMicros()), announceLocally);
}
@@ -474,7 +474,7 @@ public class MigrationManager
throw new ConfigurationException(String.format("Cannot drop non existing materialized view '%s' in keyspace '%s'.", viewName, ksName));
KeyspaceMetadata ksm = Schema.instance.getKSMetaData(ksName);
- logger.info(String.format("Drop table '%s/%s'", view.ksName, view.viewName));
+ logger.info("Drop table '{}/{}'", view.ksName, view.viewName);
announce(SchemaKeyspace.makeDropViewMutation(ksm, view, FBUtilities.timestampMicros()), announceLocally);
}
@@ -491,14 +491,14 @@ public class MigrationManager
public static void announceFunctionDrop(UDFunction udf, boolean announceLocally)
{
- logger.info(String.format("Drop scalar function overload '%s' args '%s'", udf.name(), udf.argTypes()));
+ logger.info("Drop scalar function overload '{}' args '{}'", udf.name(), udf.argTypes());
KeyspaceMetadata ksm = Schema.instance.getKSMetaData(udf.name().keyspace);
announce(SchemaKeyspace.makeDropFunctionMutation(ksm, udf, FBUtilities.timestampMicros()), announceLocally);
}
public static void announceAggregateDrop(UDAggregate udf, boolean announceLocally)
{
- logger.info(String.format("Drop aggregate function overload '%s' args '%s'", udf.name(), udf.argTypes()));
+ logger.info("Drop aggregate function overload '{}' args '{}'", udf.name(), udf.argTypes());
KeyspaceMetadata ksm = Schema.instance.getKSMetaData(udf.name().keyspace);
announce(SchemaKeyspace.makeDropAggregateMutation(ksm, udf, FBUtilities.timestampMicros()), announceLocally);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/service/ReadCallback.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/ReadCallback.java b/src/java/org/apache/cassandra/service/ReadCallback.java
index ad80913..b5cb477 100644
--- a/src/java/org/apache/cassandra/service/ReadCallback.java
+++ b/src/java/org/apache/cassandra/service/ReadCallback.java
@@ -98,7 +98,7 @@ public class ReadCallback implements IAsyncCallbackWithFailure<ReadResponse>
assert !(command instanceof PartitionRangeReadCommand) || blockfor >= endpoints.size();
if (logger.isTraceEnabled())
- logger.trace(String.format("Blockfor is %s; setting up requests to %s", blockfor, StringUtils.join(this.endpoints, ",")));
+ logger.trace("Blockfor is {}; setting up requests to {}", blockfor, StringUtils.join(this.endpoints, ","));
}
public boolean await(long timePastStart, TimeUnit unit)
@@ -241,9 +241,9 @@ public class ReadCallback implements IAsyncCallbackWithFailure<ReadResponse>
traceState.trace("Digest mismatch: {}", e.toString());
if (logger.isDebugEnabled())
logger.debug("Digest mismatch:", e);
-
+
ReadRepairMetrics.repairedBackground.mark();
-
+
final DataResolver repairResolver = new DataResolver(keyspace, command, consistencyLevel, endpoints.size(), queryStartNanoTime);
AsyncRepairCallback repairHandler = new AsyncRepairCallback(repairResolver, endpoints.size());
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/service/StorageService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 2799db2..8cc7d99 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -1190,7 +1190,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
MatchResult range = tokenScanner.match();
Token startToken = factory.fromString(range.group(1));
Token endToken = factory.fromString(range.group(2));
- logger.info(String.format("adding range: (%s,%s]", startToken, endToken));
+ logger.info("adding range: ({},{}]", startToken, endToken);
ranges.add(new Range<>(startToken, endToken));
}
if (tokenScanner.hasNext())
@@ -2275,19 +2275,19 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
if (epToTokenCopy.get(currentOwner).size() < 1)
endpointsToRemove.add(currentOwner);
- logger.info(String.format("Nodes %s and %s have the same token %s. %s is the new owner",
- endpoint,
- currentOwner,
- token,
- endpoint));
+ logger.info("Nodes {} and {} have the same token {}. {} is the new owner",
+ endpoint,
+ currentOwner,
+ token,
+ endpoint);
}
else
{
- logger.info(String.format("Nodes %s and %s have the same token %s. Ignoring %s",
- endpoint,
- currentOwner,
- token,
- endpoint));
+ logger.info("Nodes {} and {} have the same token {}. Ignoring %s",
+ endpoint,
+ currentOwner,
+ token,
+ endpoint);
}
}
@@ -4977,7 +4977,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
public void setHintedHandoffThrottleInKB(int throttleInKB)
{
DatabaseDescriptor.setHintedHandoffThrottleInKB(throttleInKB);
- logger.info(String.format("Updated hinted_handoff_throttle_in_kb to %d", throttleInKB));
+ logger.info("Updated hinted_handoff_throttle_in_kb to {}", throttleInKB);
}
public static List<PartitionPosition> getDiskBoundaries(ColumnFamilyStore cfs, Directories.DataDirectory[] directories)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java b/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java
index e98e6f2..5a272dd 100644
--- a/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java
+++ b/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java
@@ -48,12 +48,12 @@ public class TServerCustomFactory implements TServerFactory
else if(ThriftServer.ThriftServerType.ASYNC.equalsIgnoreCase(serverType))
{
server = new CustomTNonBlockingServer.Factory().buildTServer(args);
- logger.info(String.format("Using non-blocking/asynchronous thrift server on %s : %s", args.addr.getHostName(), args.addr.getPort()));
+ logger.info("Using non-blocking/asynchronous thrift server on {} : {}", args.addr.getHostName(), args.addr.getPort());
}
else if(ThriftServer.ThriftServerType.HSHA.equalsIgnoreCase(serverType))
{
server = new THsHaDisruptorServer.Factory().buildTServer(args);
- logger.info(String.format("Using custom half-sync/half-async thrift server on %s : %s", args.addr.getHostName(), args.addr.getPort()));
+ logger.info("Using custom half-sync/half-async thrift server on {} : {}", args.addr.getHostName(), args.addr.getPort());
}
else
{
@@ -67,7 +67,7 @@ public class TServerCustomFactory implements TServerFactory
throw new RuntimeException("Failed to instantiate server factory:" + serverType, e);
}
server = serverFactory.buildTServer(args);
- logger.info(String.format("Using custom thrift server %s on %s : %s", server.getClass().getName(), args.addr.getHostName(), args.addr.getPort()));
+ logger.info("Using custom thrift server {} on {} : {}", server.getClass().getName(), args.addr.getHostName(), args.addr.getPort());
}
return server;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/thrift/ThriftServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/ThriftServer.java b/src/java/org/apache/cassandra/thrift/ThriftServer.java
index 21ee475..4aa5736 100644
--- a/src/java/org/apache/cassandra/thrift/ThriftServer.java
+++ b/src/java/org/apache/cassandra/thrift/ThriftServer.java
@@ -113,7 +113,7 @@ public class ThriftServer implements CassandraDaemon.Server
TTransportFactory transportFactory)
{
// now we start listening for clients
- logger.info(String.format("Binding thrift service to %s:%s", listenAddr, listenPort));
+ logger.info("Binding thrift service to {}:{}", listenAddr, listenPort);
TServerFactory.Args args = new TServerFactory.Args();
args.tProtocolFactory = new TBinaryProtocol.Factory(true, true);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/utils/CLibrary.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/CLibrary.java b/src/java/org/apache/cassandra/utils/CLibrary.java
index 5cbc866..937d662 100644
--- a/src/java/org/apache/cassandra/utils/CLibrary.java
+++ b/src/java/org/apache/cassandra/utils/CLibrary.java
@@ -247,7 +247,7 @@ public final class CLibrary
if (!(e instanceof LastErrorException))
throw e;
- logger.warn(String.format("posix_fadvise(%d, %d) failed, errno (%d).", fd, offset, errno(e)));
+ logger.warn("posix_fadvise({}, {}) failed, errno ({}).", fd, offset, errno(e));
}
}
@@ -269,7 +269,7 @@ public final class CLibrary
if (!(e instanceof LastErrorException))
throw e;
- logger.warn(String.format("fcntl(%d, %d, %d) failed, errno (%d).", fd, command, flags, errno(e)));
+ logger.warn("fcntl({}, {}, {}) failed, errno ({}).", fd, command, flags, errno(e));
}
return result;
@@ -292,7 +292,7 @@ public final class CLibrary
if (!(e instanceof LastErrorException))
throw e;
- logger.warn(String.format("open(%s, O_RDONLY) failed, errno (%d).", path, errno(e)));
+ logger.warn("open({}, O_RDONLY) failed, errno ({}).", path, errno(e));
}
return fd;
@@ -339,7 +339,7 @@ public final class CLibrary
if (!(e instanceof LastErrorException))
throw e;
- logger.warn(String.format("close(%d) failed, errno (%d).", fd, errno(e)));
+ logger.warn("close({}) failed, errno ({}).", fd, errno(e));
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2dc2bb80/src/java/org/apache/cassandra/utils/FilterFactory.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/FilterFactory.java b/src/java/org/apache/cassandra/utils/FilterFactory.java
index 298e734..ddcf1bb 100644
--- a/src/java/org/apache/cassandra/utils/FilterFactory.java
+++ b/src/java/org/apache/cassandra/utils/FilterFactory.java
@@ -55,7 +55,7 @@ public class FilterFactory
int bucketsPerElement = Math.min(targetBucketsPerElem, maxBucketsPerElement);
if (bucketsPerElement < targetBucketsPerElem)
{
- logger.warn(String.format("Cannot provide an optimal BloomFilter for %d elements (%d/%d buckets per element).", numElements, bucketsPerElement, targetBucketsPerElem));
+ logger.warn("Cannot provide an optimal BloomFilter for {} elements ({}/{} buckets per element).", numElements, bucketsPerElement, targetBucketsPerElem);
}
BloomCalculations.BloomSpecification spec = BloomCalculations.computeBloomSpec(bucketsPerElement);
return createFilter(spec.K, numElements, spec.bucketsPerElement, offheap, oldBfHashOrder);