You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by bl...@apache.org on 2015/09/22 22:03:24 UTC
[2/3] cassandra git commit: Add a separate production debug log for
troubleshooting
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
index 9a88164..ddbc3a1 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
@@ -71,7 +71,7 @@ public class LeveledCompactionStrategy extends AbstractCompactionStrategy
maxSSTableSizeInMB = configuredMaxSSTableSize;
manifest = new LeveledManifest(cfs, this.maxSSTableSizeInMB, localOptions);
- logger.debug("Created {}", manifest);
+ logger.trace("Created {}", manifest);
}
public int getLevelSize(int i)
@@ -101,7 +101,7 @@ public class LeveledCompactionStrategy extends AbstractCompactionStrategy
SSTableReader sstable = findDroppableSSTable(gcBefore);
if (sstable == null)
{
- logger.debug("No compaction necessary for {}", this);
+ logger.trace("No compaction necessary for {}", this);
return null;
}
candidate = new LeveledManifest.CompactionCandidate(Collections.singleton(sstable),
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
index 0d0928f..d90318f 100644
--- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
@@ -115,7 +115,7 @@ public class LeveledManifest
if (canAddSSTable(reader))
{
// adding the sstable does not cause overlap in the level
- logger.debug("Adding {} to L{}", reader, level);
+ logger.trace("Adding {} to L{}", reader, level);
generations[level].add(reader);
}
else
@@ -146,8 +146,8 @@ public class LeveledManifest
{
assert !removed.isEmpty(); // use add() instead of promote when adding new sstables
logDistribution();
- if (logger.isDebugEnabled())
- logger.debug("Replacing [{}]", toString(removed));
+ if (logger.isTraceEnabled())
+ logger.trace("Replacing [{}]", toString(removed));
// the level for the added sstables is the max of the removed ones,
// plus one if the removed were all on the same level
@@ -163,8 +163,8 @@ public class LeveledManifest
if (added.isEmpty())
return;
- if (logger.isDebugEnabled())
- logger.debug("Adding [{}]", toString(added));
+ if (logger.isTraceEnabled())
+ logger.trace("Adding [{}]", toString(added));
for (SSTableReader ssTableReader : added)
add(ssTableReader);
@@ -317,7 +317,7 @@ public class LeveledManifest
Set<SSTableReader> sstablesInLevel = Sets.newHashSet(sstables);
Set<SSTableReader> remaining = Sets.difference(sstablesInLevel, cfs.getTracker().getCompacting());
double score = (double) SSTableReader.getTotalBytes(remaining) / (double)maxBytesForLevel(i, maxSSTableSizeInBytes);
- logger.debug("Compaction score for level {} is {}", i, score);
+ logger.trace("Compaction score for level {} is {}", i, score);
if (score > 1.001)
{
@@ -327,7 +327,7 @@ public class LeveledManifest
List<SSTableReader> mostInteresting = getSSTablesForSTCS(getLevel(0));
if (!mostInteresting.isEmpty())
{
- logger.debug("L0 is too far behind, performing size-tiering there first");
+ logger.trace("L0 is too far behind, performing size-tiering there first");
return new CompactionCandidate(mostInteresting, 0, Long.MAX_VALUE);
}
}
@@ -338,13 +338,13 @@ public class LeveledManifest
{
int nextLevel = getNextLevel(candidates);
candidates = getOverlappingStarvedSSTables(nextLevel, candidates);
- if (logger.isDebugEnabled())
- logger.debug("Compaction candidates for L{} are {}", i, toString(candidates));
+ if (logger.isTraceEnabled())
+ logger.trace("Compaction candidates for L{} are {}", i, toString(candidates));
return new CompactionCandidate(candidates, nextLevel, cfs.getCompactionStrategy().getMaxSSTableBytes());
}
else
{
- logger.debug("No compaction candidates for L{}", i);
+ logger.trace("No compaction candidates for L{}", i);
}
}
}
@@ -387,10 +387,10 @@ public class LeveledManifest
for (int i = generations.length - 1; i > 0; i--)
compactionCounter[i]++;
compactionCounter[targetLevel] = 0;
- if (logger.isDebugEnabled())
+ if (logger.isTraceEnabled())
{
for (int j = 0; j < compactionCounter.length; j++)
- logger.debug("CompactionCounter: {}: {}", j, compactionCounter[j]);
+ logger.trace("CompactionCounter: {}: {}", j, compactionCounter[j]);
}
for (int i = generations.length - 1; i > 0; i--)
@@ -451,13 +451,13 @@ public class LeveledManifest
private void logDistribution()
{
- if (logger.isDebugEnabled())
+ if (logger.isTraceEnabled())
{
for (int i = 0; i < generations.length; i++)
{
if (!getLevel(i).isEmpty())
{
- logger.debug("L{} contains {} SSTables ({} bytes) in {}",
+ logger.trace("L{} contains {} SSTables ({} bytes) in {}",
i, getLevel(i).size(), SSTableReader.getTotalBytes(getLevel(i)), this);
}
}
@@ -539,7 +539,7 @@ public class LeveledManifest
private Collection<SSTableReader> getCandidatesFor(int level)
{
assert !getLevel(level).isEmpty();
- logger.debug("Choosing candidates for L{}", level);
+ logger.trace("Choosing candidates for L{}", level);
final Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
@@ -703,7 +703,7 @@ public class LeveledManifest
tasks += estimated[i];
}
- logger.debug("Estimating {} compactions to do for {}.{}",
+ logger.trace("Estimating {} compactions to do for {}.{}",
Arrays.toString(estimated), cfs.keyspace.getName(), cfs.name);
return Ints.checkedCast(tasks);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
index 4ba2378..b4125bb 100644
--- a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
@@ -82,7 +82,7 @@ public class SizeTieredCompactionStrategy extends AbstractCompactionStrategy
Iterable<SSTableReader> candidates = filterSuspectSSTables(Sets.intersection(cfs.getUncompactingSSTables(), sstables));
List<List<SSTableReader>> buckets = getBuckets(createSSTableAndLengthPairs(candidates), sizeTieredOptions.bucketHigh, sizeTieredOptions.bucketLow, sizeTieredOptions.minSSTableSize);
- logger.debug("Compaction buckets are {}", buckets);
+ logger.trace("Compaction buckets are {}", buckets);
updateEstimatedCompactionsByTasks(buckets);
List<SSTableReader> mostInteresting = mostInterestingBucket(buckets, minThreshold, maxThreshold);
if (!mostInteresting.isEmpty())
@@ -210,7 +210,7 @@ public class SizeTieredCompactionStrategy extends AbstractCompactionStrategy
LifecycleTransaction transaction = cfs.getTracker().tryModify(sstables, OperationType.COMPACTION);
if (transaction == null)
{
- logger.debug("Unable to mark {} for compaction; probably a background compaction got to it first. You can disable background compactions temporarily if this is a problem", sstables);
+ logger.trace("Unable to mark {} for compaction; probably a background compaction got to it first. You can disable background compactions temporarily if this is a problem", sstables);
return null;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/WrappingCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/WrappingCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/WrappingCompactionStrategy.java
index 1e73daf..9daa0c5 100644
--- a/src/java/org/apache/cassandra/db/compaction/WrappingCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/WrappingCompactionStrategy.java
@@ -63,7 +63,7 @@ public final class WrappingCompactionStrategy extends AbstractCompactionStrategy
super(cfs, cfs.metadata.compactionStrategyOptions);
reloadCompactionStrategy(cfs.metadata);
cfs.getTracker().subscribe(this);
- logger.debug("{} subscribed to the data tracker.", this);
+ logger.trace("{} subscribed to the data tracker.", this);
}
@Override
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
index 7d88458..6611b3f 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
@@ -45,7 +45,7 @@ public class DefaultCompactionWriter extends CompactionAwareWriter
public DefaultCompactionWriter(ColumnFamilyStore cfs, LifecycleTransaction txn, Set<SSTableReader> nonExpiredSSTables, boolean offline, OperationType compactionType)
{
super(cfs, txn, nonExpiredSSTables, offline);
- logger.debug("Expected bloom filter size : {}", estimatedTotalKeys);
+ logger.trace("Expected bloom filter size : {}", estimatedTotalKeys);
long expectedWriteSize = cfs.getExpectedCompactedFileSize(nonExpiredSSTables, compactionType);
File sstableDirectory = cfs.directories.getLocationForDisk(getWriteDirectory(expectedWriteSize));
@SuppressWarnings("resource")
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
index 9ff1325..ed07df9 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
@@ -93,7 +93,7 @@ public class SplittingSizeTieredCompactionWriter extends CompactionAwareWriter
new MetadataCollector(allSSTables, cfs.metadata.comparator, 0));
sstableWriter.switchWriter(writer);
- logger.debug("Ratios={}, expectedKeys = {}, totalSize = {}, currentPartitionsToWrite = {}, currentBytesToWrite = {}", ratios, estimatedTotalKeys, totalSize, currentPartitionsToWrite, currentBytesToWrite);
+ logger.trace("Ratios={}, expectedKeys = {}, totalSize = {}, currentPartitionsToWrite = {}, currentBytesToWrite = {}", ratios, estimatedTotalKeys, totalSize, currentPartitionsToWrite, currentBytesToWrite);
}
@Override
@@ -114,7 +114,7 @@ public class SplittingSizeTieredCompactionWriter extends CompactionAwareWriter
cfs.partitioner,
new MetadataCollector(allSSTables, cfs.metadata.comparator, 0));
sstableWriter.switchWriter(writer);
- logger.debug("Switching writer, currentPartitionsToWrite = {}", currentPartitionsToWrite);
+ logger.trace("Switching writer, currentPartitionsToWrite = {}", currentPartitionsToWrite);
}
return rie != null;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java b/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
index 50ab57d..c6d93fe 100644
--- a/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
@@ -261,7 +261,7 @@ public abstract class ExtendedFilter
{
if (data.getColumn(data.getComparator().cellFromByteBuffer(expr.column)) == null)
{
- logger.debug("adding extraFilter to cover additional expressions");
+ logger.trace("adding extraFilter to cover additional expressions");
return true;
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java b/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
index ba48350..854d688 100644
--- a/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
+++ b/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
@@ -101,8 +101,8 @@ public abstract class AbstractSimplePerColumnSecondaryIndex extends PerColumnSec
ColumnFamily cfi = ArrayBackedSortedColumns.factory.create(indexCfs.metadata, false, 1);
cfi.addTombstone(makeIndexColumnName(rowKey, cell), localDeletionTime, cell.timestamp());
indexCfs.apply(valueKey, cfi, SecondaryIndexManager.nullUpdater, opGroup, null);
- if (logger.isDebugEnabled())
- logger.debug("removed index entry for cleaned-up value {}:{}", valueKey, cfi);
+ if (logger.isTraceEnabled())
+ logger.trace("removed index entry for cleaned-up value {}:{}", valueKey, cfi);
}
public void insert(ByteBuffer rowKey, Cell cell, OpOrder.Group opGroup)
@@ -119,8 +119,8 @@ public abstract class AbstractSimplePerColumnSecondaryIndex extends PerColumnSec
{
cfi.addColumn(new BufferCell(name, ByteBufferUtil.EMPTY_BYTE_BUFFER, cell.timestamp()));
}
- if (logger.isDebugEnabled())
- logger.debug("applying index row {} in {}", indexCfs.metadata.getKeyValidator().getString(valueKey.getKey()), cfi);
+ if (logger.isTraceEnabled())
+ logger.trace("applying index row {} in {}", indexCfs.metadata.getKeyValidator().getString(valueKey.getKey()), cfi);
indexCfs.apply(valueKey, cfi, SecondaryIndexManager.nullUpdater, opGroup, null);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
index e88d456..0be78cc 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
@@ -142,8 +142,8 @@ public abstract class CompositesIndex extends AbstractSimplePerColumnSecondaryIn
ColumnFamily cfi = ArrayBackedSortedColumns.factory.create(indexCfs.metadata);
cfi.addTombstone(entry.indexEntry, localDeletionTime, entry.timestamp);
indexCfs.apply(entry.indexValue, cfi, SecondaryIndexManager.nullUpdater, opGroup, null);
- if (logger.isDebugEnabled())
- logger.debug("removed index entry for cleaned-up value {}:{}", entry.indexValue, cfi);
+ if (logger.isTraceEnabled())
+ logger.trace("removed index entry for cleaned-up value {}:{}", entry.indexValue, cfi);
}
protected AbstractType<?> getExpressionComparator()
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
index 88453df..2f85e35 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
@@ -98,8 +98,8 @@ public class CompositesSearcher extends SecondaryIndexSearcher
assert index.getIndexCfs() != null;
final DecoratedKey indexKey = index.getIndexKeyFor(primary.value);
- if (logger.isDebugEnabled())
- logger.debug("Most-selective indexed predicate is {}", index.expressionString(primary));
+ if (logger.isTraceEnabled())
+ logger.trace("Most-selective indexed predicate is {}", index.expressionString(primary));
/*
* XXX: If the range requested is a token range, we'll have to start at the beginning (and stop at the end) of
@@ -240,7 +240,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
}
else
{
- logger.debug("Skipping entry {} before assigned scan range", dk.getToken());
+ logger.trace("Skipping entry {} before assigned scan range", dk.getToken());
continue;
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java b/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
index b4fd0ba..2f0420f 100644
--- a/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
@@ -73,8 +73,8 @@ public class KeysSearcher extends SecondaryIndexSearcher
assert index.getIndexCfs() != null;
final DecoratedKey indexKey = index.getIndexKeyFor(primary.value);
- if (logger.isDebugEnabled())
- logger.debug("Most-selective indexed predicate is {}",
+ if (logger.isTraceEnabled())
+ logger.trace("Most-selective indexed predicate is {}",
((AbstractSimplePerColumnSecondaryIndex) index).expressionString(primary));
/*
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java b/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
index dfa30c7..9b52269 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java
@@ -150,7 +150,7 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
{
assert staged.isEmpty() : "must be no actions introduced between prepareToCommit and a commit";
- logger.debug("Committing update:{}, obsolete:{}", staged.update, staged.obsolete);
+ logger.trace("Committing update:{}, obsolete:{}", staged.update, staged.obsolete);
// this is now the point of no return; we cannot safely rollback, so we ignore exceptions until we're done
// we restore state by obsoleting our obsolete files, releasing our references to them, and updating our size
@@ -167,15 +167,15 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
*/
public Throwable doAbort(Throwable accumulate)
{
- if (logger.isDebugEnabled())
- logger.debug("Aborting transaction over {}, with ({},{}) logged and ({},{}) staged", originals, logged.update, logged.obsolete, staged.update, staged.obsolete);
+ if (logger.isTraceEnabled())
+ logger.trace("Aborting transaction over {}, with ({},{}) logged and ({},{}) staged", originals, logged.update, logged.obsolete, staged.update, staged.obsolete);
if (logged.isEmpty() && staged.isEmpty())
return accumulate;
// mark obsolete all readers that are not versions of those present in the original set
Iterable<SSTableReader> obsolete = filterOut(concatUniq(staged.update, logged.update), originals);
- logger.debug("Obsoleting {}", obsolete);
+ logger.trace("Obsoleting {}", obsolete);
// we don't pass the tracker in for the obsoletion, since these readers have never been notified externally
// nor had their size accounting affected
accumulate = markObsolete(null, obsolete, accumulate);
@@ -221,8 +221,8 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
}
private Throwable checkpoint(Throwable accumulate)
{
- if (logger.isDebugEnabled())
- logger.debug("Checkpointing update:{}, obsolete:{}", staged.update, staged.obsolete);
+ if (logger.isTraceEnabled())
+ logger.trace("Checkpointing update:{}, obsolete:{}", staged.update, staged.obsolete);
if (staged.isEmpty())
return accumulate;
@@ -275,7 +275,7 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
*/
public void obsolete(SSTableReader reader)
{
- logger.debug("Staging for obsolescence {}", reader);
+ logger.trace("Staging for obsolescence {}", reader);
// check this is: a reader guarded by the transaction, an instance we have already worked with
// and that we haven't already obsoleted it, nor do we have other changes staged for it
assert identities.contains(reader.instanceId) : "only reader instances that have previously been provided may be obsoleted: " + reader;
@@ -291,7 +291,7 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
*/
public void obsoleteOriginals()
{
- logger.debug("Staging for obsolescence {}", originals);
+ logger.trace("Staging for obsolescence {}", originals);
// if we're obsoleting, we should have no staged updates for the original files
assert Iterables.isEmpty(filterIn(staged.update, originals)) : staged.update;
@@ -381,7 +381,7 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
*/
public void cancel(SSTableReader cancel)
{
- logger.debug("Cancelling {} from transaction", cancel);
+ logger.trace("Cancelling {} from transaction", cancel);
assert originals.contains(cancel) : "may only cancel a reader in the 'original' set: " + cancel + " vs " + originals;
assert !(staged.contains(cancel) || logged.contains(cancel)) : "may only cancel a reader that has not been updated or obsoleted in this transaction: " + cancel;
originals.remove(cancel);
@@ -405,7 +405,7 @@ public class LifecycleTransaction extends Transactional.AbstractTransactional
*/
public LifecycleTransaction split(Collection<SSTableReader> readers)
{
- logger.debug("Splitting {} into new transaction", readers);
+ logger.trace("Splitting {} into new transaction", readers);
checkUnused();
for (SSTableReader reader : readers)
assert identities.contains(reader.instanceId) : "may only split the same reader instance the transaction was opened with: " + reader;
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
index f1c4685..a074216 100644
--- a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
+++ b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java
@@ -137,8 +137,8 @@ public class Tracker
long add = 0;
for (SSTableReader sstable : newSSTables)
{
- if (logger.isDebugEnabled())
- logger.debug("adding {} to list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name);
+ if (logger.isTraceEnabled())
+ logger.trace("adding {} to list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name);
try
{
add += sstable.bytesOnDisk();
@@ -151,8 +151,8 @@ public class Tracker
long subtract = 0;
for (SSTableReader sstable : oldSSTables)
{
- if (logger.isDebugEnabled())
- logger.debug("removing {} from list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name);
+ if (logger.isTraceEnabled())
+ logger.trace("removing {} from list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name);
try
{
subtract += sstable.bytesOnDisk();
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/dht/BootStrapper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/dht/BootStrapper.java b/src/java/org/apache/cassandra/dht/BootStrapper.java
index aca05f0..a6b1ad7 100644
--- a/src/java/org/apache/cassandra/dht/BootStrapper.java
+++ b/src/java/org/apache/cassandra/dht/BootStrapper.java
@@ -65,7 +65,7 @@ public class BootStrapper extends ProgressEventNotifierSupport
public ListenableFuture<StreamState> bootstrap(StreamStateStore stateStore, boolean useStrictConsistency)
{
- logger.debug("Beginning bootstrap process");
+ logger.trace("Beginning bootstrap process");
RangeStreamer streamer = new RangeStreamer(tokenMetadata,
tokens,
@@ -159,7 +159,7 @@ public class BootStrapper extends ProgressEventNotifierSupport
// if user specified tokens, use those
if (initialTokens.size() > 0)
{
- logger.debug("tokens manually specified as {}", initialTokens);
+ logger.trace("tokens manually specified as {}", initialTokens);
List<Token> tokens = new ArrayList<>(initialTokens.size());
for (String tokenString : initialTokens)
{
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/dht/RangeStreamer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/dht/RangeStreamer.java b/src/java/org/apache/cassandra/dht/RangeStreamer.java
index 68c8a11..8f2dc12 100644
--- a/src/java/org/apache/cassandra/dht/RangeStreamer.java
+++ b/src/java/org/apache/cassandra/dht/RangeStreamer.java
@@ -146,18 +146,18 @@ public class RangeStreamer
Multimap<Range<Token>, InetAddress> rangesForKeyspace = useStrictSourcesForRanges(keyspaceName)
? getAllRangesWithStrictSourcesFor(keyspaceName, ranges) : getAllRangesWithSourcesFor(keyspaceName, ranges);
- if (logger.isDebugEnabled())
+ if (logger.isTraceEnabled())
{
for (Map.Entry<Range<Token>, InetAddress> entry : rangesForKeyspace.entries())
- logger.debug(String.format("%s: range %s exists on %s", description, entry.getKey(), entry.getValue()));
+ logger.trace(String.format("%s: range %s exists on %s", description, entry.getKey(), entry.getValue()));
}
for (Map.Entry<InetAddress, Collection<Range<Token>>> entry : getRangeFetchMap(rangesForKeyspace, sourceFilters, keyspaceName).asMap().entrySet())
{
- if (logger.isDebugEnabled())
+ if (logger.isTraceEnabled())
{
for (Range<Token> r : entry.getValue())
- logger.debug(String.format("%s: range %s from source %s for keyspace %s", description, r, entry.getKey(), keyspaceName));
+ logger.trace(String.format("%s: range %s from source %s for keyspace %s", description, r, entry.getKey(), keyspaceName));
}
toFetch.put(keyspaceName, entry);
}
@@ -339,8 +339,8 @@ public class RangeStreamer
logger.info("Some ranges of {} are already available. Skipping streaming those ranges.", availableRanges);
}
- if (logger.isDebugEnabled())
- logger.debug("{}ing from {} ranges {}", description, source, StringUtils.join(ranges, ", "));
+ if (logger.isTraceEnabled())
+ logger.trace("{}ing from {} ranges {}", description, source, StringUtils.join(ranges, ", "));
/* Send messages to respective folks to stream data over to me */
streamPlan.requestRanges(source, preferred, keyspace, ranges);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/AbstractColumnFamilyInputFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/AbstractColumnFamilyInputFormat.java b/src/java/org/apache/cassandra/hadoop/AbstractColumnFamilyInputFormat.java
index 9c45bfe..103d478 100644
--- a/src/java/org/apache/cassandra/hadoop/AbstractColumnFamilyInputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/AbstractColumnFamilyInputFormat.java
@@ -81,7 +81,7 @@ public abstract class AbstractColumnFamilyInputFormat<K, Y> extends InputFormat<
keyspace = ConfigHelper.getInputKeyspace(conf);
cfName = ConfigHelper.getInputColumnFamily(conf);
partitioner = ConfigHelper.getInputPartitioner(conf);
- logger.debug("partitioner is {}", partitioner);
+ logger.trace("partitioner is {}", partitioner);
// canonical ranges and nodes holding replicas
Map<TokenRange, Set<Host>> masterRangeNodes = getRangeMap(conf, keyspace);
@@ -219,7 +219,7 @@ public abstract class AbstractColumnFamilyInputFormat<K, Y> extends InputFormat<
subSplits.get(subSplit),
endpoints);
- logger.debug("adding {}", split);
+ logger.trace("adding {}", split);
splits.add(split);
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
index 4662fa5..87cb791 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
@@ -63,7 +63,7 @@ public class ColumnFamilyInputFormat extends AbstractColumnFamilyInputFormat<Byt
@SuppressWarnings("resource")
public static Cassandra.Client createAuthenticatedClient(String location, int port, Configuration conf) throws Exception
{
- logger.debug("Creating authenticated client for CF input format");
+ logger.trace("Creating authenticated client for CF input format");
TTransport transport;
try
{
@@ -86,7 +86,7 @@ public class ColumnFamilyInputFormat extends AbstractColumnFamilyInputFormat<Byt
AuthenticationRequest authRequest = new AuthenticationRequest(creds);
client.login(authRequest);
}
- logger.debug("Authenticated client for CF input format created successfully");
+ logger.trace("Authenticated client for CF input format created successfully");
return client;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/ColumnFamilyOutputFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyOutputFormat.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyOutputFormat.java
index 92e3829..edc988b 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyOutputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyOutputFormat.java
@@ -117,7 +117,7 @@ public class ColumnFamilyOutputFormat extends OutputFormat<ByteBuffer,List<Mutat
@SuppressWarnings("resource")
public static Cassandra.Client createAuthenticatedClient(String host, int port, Configuration conf) throws Exception
{
- logger.debug("Creating authenticated client for CF output format");
+ logger.trace("Creating authenticated client for CF output format");
TTransport transport = ConfigHelper.getClientTransportFactory(conf).openTransport(host, port);
TProtocol binaryProtocol = new TBinaryProtocol(transport, true, true);
Cassandra.Client client = new Cassandra.Client(binaryProtocol);
@@ -127,7 +127,7 @@ public class ColumnFamilyOutputFormat extends OutputFormat<ByteBuffer,List<Mutat
if ((user != null) && (password != null))
login(user, password, client);
- logger.debug("Authenticated client for CF output format created successfully");
+ logger.trace("Authenticated client for CF output format created successfully");
return client;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
index aee730d..97dc497 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
@@ -170,14 +170,14 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
}
iter = widerows ? new WideRowIterator() : new StaticRowIterator();
- logger.debug("created {}", iter);
+ logger.trace("created {}", iter);
}
public boolean nextKeyValue() throws IOException
{
if (!iter.hasNext())
{
- logger.debug("Finished scanning {} rows (estimate was: {})", iter.rowsRead(), totalRowCount);
+ logger.trace("Finished scanning {} rows (estimate was: {})", iter.rowsRead(), totalRowCount);
return false;
}
@@ -443,7 +443,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
else
{
KeySlice lastRow = Iterables.getLast(rows);
- logger.debug("Starting with last-seen row {}", lastRow.key);
+ logger.trace("Starting with last-seen row {}", lastRow.key);
keyRange = new KeyRange(batchSize)
.setStart_key(lastRow.key)
.setEnd_token(split.getEndToken())
@@ -456,7 +456,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
int n = 0;
for (KeySlice row : rows)
n += row.columns.size();
- logger.debug("read {} columns in {} rows for {} starting with {}",
+ logger.trace("read {} columns in {} rows for {} starting with {}",
new Object[]{ n, rows.size(), keyRange, lastColumn });
wideColumns = Iterators.peekingIterator(new WideColumnIterator(rows));
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/cql3/CqlRecordReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlRecordReader.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlRecordReader.java
index 6db851d..b3e440d 100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlRecordReader.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlRecordReader.java
@@ -156,10 +156,10 @@ public class CqlRecordReader extends RecordReader<Long, Row>
if (StringUtils.isEmpty(cqlQuery))
cqlQuery = buildQuery();
- logger.debug("cqlQuery {}", cqlQuery);
+ logger.trace("cqlQuery {}", cqlQuery);
rowIterator = new RowIterator();
- logger.debug("created {}", rowIterator);
+ logger.trace("created {}", rowIterator);
}
public void close()
@@ -194,7 +194,7 @@ public class CqlRecordReader extends RecordReader<Long, Row>
{
if (!rowIterator.hasNext())
{
- logger.debug("Finished scanning {} rows (estimate was: {})", rowIterator.totalRead, totalRowCount);
+ logger.trace("Finished scanning {} rows (estimate was: {})", rowIterator.totalRead, totalRowCount);
return false;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/cql3/LimitedLocalNodeFirstLocalBalancingPolicy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/cql3/LimitedLocalNodeFirstLocalBalancingPolicy.java b/src/java/org/apache/cassandra/hadoop/cql3/LimitedLocalNodeFirstLocalBalancingPolicy.java
index da0822a..5c8d3c5 100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/LimitedLocalNodeFirstLocalBalancingPolicy.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/LimitedLocalNodeFirstLocalBalancingPolicy.java
@@ -71,7 +71,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
logger.warn("Invalid replica host name: {}, skipping it", replica);
}
}
- logger.debug("Created instance with the following replicas: {}", Arrays.asList(replicas));
+ logger.trace("Created instance with the following replicas: {}", Arrays.asList(replicas));
}
@Override
@@ -86,7 +86,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
}
}
liveReplicaHosts.addAll(replicaHosts);
- logger.debug("Initialized with replica hosts: {}", replicaHosts);
+ logger.trace("Initialized with replica hosts: {}", replicaHosts);
}
@Override
@@ -127,7 +127,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
Collections.shuffle(remote);
- logger.debug("Using the following hosts order for the new query plan: {} | {}", local, remote);
+ logger.trace("Using the following hosts order for the new query plan: {} | {}", local, remote);
return Iterators.concat(local.iterator(), remote.iterator());
}
@@ -138,7 +138,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
if (replicaAddresses.contains(host.getAddress()))
{
liveReplicaHosts.add(host);
- logger.debug("Added a new host {}", host);
+ logger.trace("Added a new host {}", host);
}
}
@@ -148,7 +148,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
if (replicaAddresses.contains(host.getAddress()))
{
liveReplicaHosts.add(host);
- logger.debug("The host {} is now up", host);
+ logger.trace("The host {} is now up", host);
}
}
@@ -157,7 +157,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
{
if (liveReplicaHosts.remove(host))
{
- logger.debug("The host {} is now down", host);
+ logger.trace("The host {} is now down", host);
}
}
@@ -167,7 +167,7 @@ class LimitedLocalNodeFirstLocalBalancingPolicy implements LoadBalancingPolicy
{
if (liveReplicaHosts.remove(host))
{
- logger.debug("Removed the host {}", host);
+ logger.trace("Removed the host {}", host);
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java b/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
index 5d354a7..71fe037 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
@@ -1028,7 +1028,7 @@ public class CassandraStorage extends LoadFunc implements StoreFuncInterface, Lo
ColumnDef cDef = new ColumnDef();
String columnName = def.name.toString();
String type = def.type.toString();
- logger.debug("name: {}, type: {} ", columnName, type);
+ logger.trace("name: {}, type: {} ", columnName, type);
cDef.name = ByteBufferUtil.bytes(columnName);
cDef.validation_class = type;
columnDefs.add(cDef);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/hadoop/pig/CqlNativeStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/CqlNativeStorage.java b/src/java/org/apache/cassandra/hadoop/pig/CqlNativeStorage.java
index 223a848..bdf99a0 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/CqlNativeStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/CqlNativeStorage.java
@@ -626,7 +626,7 @@ public class CqlNativeStorage extends LoadFunc implements StoreFuncInterface, Lo
if (wc != null)
{
- logger.debug("where clause: {}", wc);
+ logger.trace("where clause: {}", wc);
CqlConfigHelper.setInputWhereClauses(conf, wc);
}
if (System.getenv(StorageHelper.PIG_INPUT_SPLIT_SIZE) != null)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
index b07b2f5..1dd3a4e 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
@@ -276,7 +276,7 @@ public class IndexSummaryManager implements IndexSummaryManagerMBean
for (SSTableReader sstable : Iterables.concat(compacting, redistribute))
total += sstable.getIndexSummaryOffHeapSize();
- logger.debug("Beginning redistribution of index summaries for {} sstables with memory pool size {} MB; current spaced used is {} MB",
+ logger.trace("Beginning redistribution of index summaries for {} sstables with memory pool size {} MB; current spaced used is {} MB",
redistribute.size(), memoryPoolBytes / 1024L / 1024L, total / 1024.0 / 1024.0);
final Map<SSTableReader, Double> readRates = new HashMap<>(redistribute.size());
@@ -310,7 +310,7 @@ public class IndexSummaryManager implements IndexSummaryManagerMBean
total = 0;
for (SSTableReader sstable : Iterables.concat(compacting, oldFormatSSTables, newSSTables))
total += sstable.getIndexSummaryOffHeapSize();
- logger.debug("Completed resizing of index summaries; current approximate memory used: {} MB",
+ logger.trace("Completed resizing of index summaries; current approximate memory used: {} MB",
total / 1024.0 / 1024.0);
return newSSTables;
@@ -368,7 +368,7 @@ public class IndexSummaryManager implements IndexSummaryManagerMBean
if (effectiveIndexInterval < minIndexInterval)
{
// The min_index_interval was changed; re-sample to match it.
- logger.debug("Forcing resample of {} because the current index interval ({}) is below min_index_interval ({})",
+ logger.trace("Forcing resample of {} because the current index interval ({}) is below min_index_interval ({})",
sstable, effectiveIndexInterval, minIndexInterval);
long spaceUsed = (long) Math.ceil(avgEntrySize * numEntriesAtNewSamplingLevel);
forceResample.add(new ResampleEntry(sstable, spaceUsed, newSamplingLevel));
@@ -377,7 +377,7 @@ public class IndexSummaryManager implements IndexSummaryManagerMBean
else if (effectiveIndexInterval > maxIndexInterval)
{
// The max_index_interval was lowered; force an upsample to the effective minimum sampling level
- logger.debug("Forcing upsample of {} because the current index interval ({}) is above max_index_interval ({})",
+ logger.trace("Forcing upsample of {} because the current index interval ({}) is above max_index_interval ({})",
sstable, effectiveIndexInterval, maxIndexInterval);
newSamplingLevel = Math.max(1, (BASE_SAMPLING_LEVEL * minIndexInterval) / maxIndexInterval);
numEntriesAtNewSamplingLevel = IndexSummaryBuilder.entriesAtSamplingLevel(newSamplingLevel, sstable.getMaxIndexSummarySize());
@@ -424,7 +424,7 @@ public class IndexSummaryManager implements IndexSummaryManagerMBean
for (ResampleEntry entry : toDownsample)
{
SSTableReader sstable = entry.sstable;
- logger.debug("Re-sampling index summary for {} from {}/{} to {}/{} of the original number of entries",
+ logger.trace("Re-sampling index summary for {} from {}/{} to {}/{} of the original number of entries",
sstable, sstable.getIndexSummarySamplingLevel(), Downsampling.BASE_SAMPLING_LEVEL,
entry.newSamplingLevel, Downsampling.BASE_SAMPLING_LEVEL);
ColumnFamilyStore cfs = Keyspace.open(sstable.metadata.ksName).getColumnFamilyStore(sstable.metadata.cfId);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/io/sstable/SSTable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java
index 2077152..b0aa89e 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTable.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java
@@ -115,7 +115,7 @@ public abstract class SSTable
}
FileUtils.delete(desc.filenameFor(Component.SUMMARY));
- logger.debug("Deleted {}", desc);
+ logger.trace("Deleted {}", desc);
return true;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
index ce12206..87891ae 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
@@ -296,7 +296,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
if (cardinality != null)
cardinalities.add(cardinality);
else
- logger.debug("Got a null cardinality estimator in: {}", sstable.getFilename());
+ logger.trace("Got a null cardinality estimator in: {}", sstable.getFilename());
}
catch (IOException e)
{
@@ -312,7 +312,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
return 1;
long totalKeyCountAfter = mergeCardinalities(cardinalities).cardinality();
- logger.debug("Estimated compaction gain: {}/{}={}", totalKeyCountAfter, totalKeyCountBefore, ((double)totalKeyCountAfter)/totalKeyCountBefore);
+ logger.trace("Estimated compaction gain: {}/{}={}", totalKeyCountAfter, totalKeyCountBefore, ((double)totalKeyCountAfter)/totalKeyCountBefore);
return ((double)totalKeyCountAfter)/totalKeyCountBefore;
}
@@ -399,7 +399,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
System.exit(1);
}
- logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(Component.DATA)).length());
+ logger.debug("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(Component.DATA)).length());
SSTableReader sstable = internalOpen(descriptor, components, metadata, partitioner, System.currentTimeMillis(),
statsMetadata, OpenReason.NORMAL);
@@ -446,7 +446,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
System.exit(1);
}
- logger.info("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(Component.DATA)).length());
+ logger.debug("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(Component.DATA)).length());
SSTableReader sstable = internalOpen(descriptor, components, metadata, partitioner, System.currentTimeMillis(),
statsMetadata, OpenReason.NORMAL);
try
@@ -454,14 +454,14 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
// load index and filter
long start = System.nanoTime();
sstable.load(validationMetadata);
- logger.debug("INDEX LOAD TIME for {}: {} ms.", descriptor, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+ logger.trace("INDEX LOAD TIME for {}: {} ms.", descriptor, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
sstable.setup(trackHotness);
if (validate)
sstable.validate();
if (sstable.getKeyCache() != null)
- logger.debug("key cache contains {}/{} keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity());
+ logger.trace("key cache contains {}/{} keys", sstable.getKeyCache().size(), sstable.getKeyCache().getCapacity());
return sstable;
}
@@ -843,7 +843,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
{
if (indexSummary != null)
indexSummary.close();
- logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
+ logger.trace("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
// corrupted; delete it and fall back to creating a new summary
FileUtils.closeQuietly(iStream);
// delete it and fall back to creating a new summary
@@ -945,7 +945,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
}
catch (IOException e)
{
- logger.debug("Cannot save SSTable Summary: ", e);
+ logger.trace("Cannot save SSTable Summary: ", e);
// corrupted hence delete it and let it load it now.
if (summariesFile.exists())
@@ -1633,8 +1633,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
*/
public boolean markObsolete(Tracker tracker)
{
- if (logger.isDebugEnabled())
- logger.debug("Marking {} compacted", getFilename());
+ if (logger.isTraceEnabled())
+ logger.trace("Marking {} compacted", getFilename());
synchronized (tidy.global)
{
@@ -1655,8 +1655,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
public void markSuspect()
{
- if (logger.isDebugEnabled())
- logger.debug("Marking {} as a suspect for blacklisting.", getFilename());
+ if (logger.isTraceEnabled())
+ logger.trace("Marking {} as a suspect for blacklisting.", getFilename());
isSuspect.getAndSet(true);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
index 8a65d8d..30ed85b 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java
@@ -78,11 +78,11 @@ public class MetadataSerializer implements IMetadataSerializer
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
- logger.debug("Load metadata for {}", descriptor);
+ logger.trace("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
- logger.debug("No sstable stats for {}", descriptor);
+ logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
@@ -129,7 +129,7 @@ public class MetadataSerializer implements IMetadataSerializer
public void mutateLevel(Descriptor descriptor, int newLevel) throws IOException
{
- logger.debug("Mutating {} to level {}", descriptor.filenameFor(Component.STATS), newLevel);
+ logger.trace("Mutating {} to level {}", descriptor.filenameFor(Component.STATS), newLevel);
Map<MetadataType, MetadataComponent> currentComponents = deserialize(descriptor, EnumSet.allOf(MetadataType.class));
StatsMetadata stats = (StatsMetadata) currentComponents.remove(MetadataType.STATS);
// mutate level
@@ -139,7 +139,7 @@ public class MetadataSerializer implements IMetadataSerializer
public void mutateRepairedAt(Descriptor descriptor, long newRepairedAt) throws IOException
{
- logger.debug("Mutating {} to repairedAt time {}", descriptor.filenameFor(Component.STATS), newRepairedAt);
+ logger.trace("Mutating {} to repairedAt time {}", descriptor.filenameFor(Component.STATS), newRepairedAt);
Map<MetadataType, MetadataComponent> currentComponents = deserialize(descriptor, EnumSet.allOf(MetadataType.class));
StatsMetadata stats = (StatsMetadata) currentComponents.remove(MetadataType.STATS);
// mutate level
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/io/util/FileUtils.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/FileUtils.java b/src/java/org/apache/cassandra/io/util/FileUtils.java
index 9f5193b..ce45370 100644
--- a/src/java/org/apache/cassandra/io/util/FileUtils.java
+++ b/src/java/org/apache/cassandra/io/util/FileUtils.java
@@ -170,8 +170,8 @@ public class FileUtils
public static void renameWithConfirm(File from, File to)
{
assert from.exists();
- if (logger.isDebugEnabled())
- logger.debug((String.format("Renaming %s to %s", from.getPath(), to.getPath())));
+ if (logger.isTraceEnabled())
+ logger.trace((String.format("Renaming %s to %s", from.getPath(), to.getPath())));
// this is not FSWE because usually when we see it it's because we didn't close the file before renaming it,
// and Windows is picky about that.
try
@@ -198,7 +198,7 @@ public class FileUtils
}
catch (AtomicMoveNotSupportedException e)
{
- logger.debug("Could not do an atomic move", e);
+ logger.trace("Could not do an atomic move", e);
Files.move(from, to, StandardCopyOption.REPLACE_EXISTING);
}
@@ -393,7 +393,7 @@ public class FileUtils
deleteRecursiveOnExit(new File(dir, child));
}
- logger.debug("Scheduling deferred deletion of file: " + dir);
+ logger.trace("Scheduling deferred deletion of file: " + dir);
dir.deleteOnExit();
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java b/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
index 91908c9..a8fae9f 100644
--- a/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
+++ b/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
@@ -124,7 +124,7 @@ public class MmappedSegmentedFile extends SegmentedFile
continue;
FileUtils.clean(segment.right);
}
- logger.debug("All segments have been unmapped successfully");
+ logger.trace("All segments have been unmapped successfully");
}
catch (Exception e)
{
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java b/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
index 461265c..daa4aa0 100644
--- a/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
+++ b/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
@@ -84,7 +84,7 @@ public abstract class AbstractReplicationStrategy
{
if (lastVersion > lastInvalidatedVersion)
{
- logger.debug("clearing cached endpoints");
+ logger.trace("clearing cached endpoints");
cachedEndpoints.clear();
lastInvalidatedVersion = lastVersion;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
index 6959f29..307a07f 100644
--- a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
+++ b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
@@ -71,7 +71,7 @@ public class NetworkTopologyStrategy extends AbstractReplicationStrategy
}
datacenters = Collections.unmodifiableMap(newDatacenters);
- logger.debug("Configured datacenter replicas are {}", FBUtilities.toString(datacenters));
+ logger.trace("Configured datacenter replicas are {}", FBUtilities.toString(datacenters));
}
/**
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/locator/PropertyFileSnitch.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/PropertyFileSnitch.java b/src/java/org/apache/cassandra/locator/PropertyFileSnitch.java
index 4bcdcc5..590117d 100644
--- a/src/java/org/apache/cassandra/locator/PropertyFileSnitch.java
+++ b/src/java/org/apache/cassandra/locator/PropertyFileSnitch.java
@@ -99,7 +99,7 @@ public class PropertyFileSnitch extends AbstractNetworkTopologySnitch
String[] value = endpointMap.get(endpoint);
if (value == null)
{
- logger.debug("Could not find end point information for {}, will use default", endpoint);
+ logger.trace("Could not find end point information for {}, will use default", endpoint);
return defaultDCRack;
}
return value;
@@ -182,12 +182,12 @@ public class PropertyFileSnitch extends AbstractNetworkTopologySnitch
throw new ConfigurationException(String.format("Snitch definitions at %s do not define a location for this node's broadcast address %s, nor does it provides a default",
SNITCH_PROPERTIES_FILENAME, FBUtilities.getBroadcastAddress()));
- if (logger.isDebugEnabled())
+ if (logger.isTraceEnabled())
{
StringBuilder sb = new StringBuilder();
for (Map.Entry<InetAddress, String[]> entry : reloadedMap.entrySet())
sb.append(entry.getKey()).append(":").append(Arrays.toString(entry.getValue())).append(", ");
- logger.debug("Loaded network topology from property file: {}", StringUtils.removeEnd(sb.toString(), ", "));
+ logger.trace("Loaded network topology from property file: {}", StringUtils.removeEnd(sb.toString(), ", "));
}
endpointMap = reloadedMap;
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
index 3277af7..8b34fc0 100644
--- a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
+++ b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
@@ -63,7 +63,7 @@ public class ReconnectableSnitchHelper implements IEndpointStateChangeSubscriber
&& !MessagingService.instance().getConnectionPool(publicAddress).endPoint().equals(localAddress))
{
MessagingService.instance().getConnectionPool(publicAddress).reset(localAddress);
- logger.debug(String.format("Intiated reconnect to an Internal IP %s for the %s", localAddress, publicAddress));
+ logger.trace(String.format("Intiated reconnect to an Internal IP %s for the %s", localAddress, publicAddress));
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/locator/TokenMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/TokenMetadata.java b/src/java/org/apache/cassandra/locator/TokenMetadata.java
index fc65dd8..db0b609 100644
--- a/src/java/org/apache/cassandra/locator/TokenMetadata.java
+++ b/src/java/org/apache/cassandra/locator/TokenMetadata.java
@@ -737,8 +737,8 @@ public class TokenMetadata
if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && movingEndpoints.isEmpty())
{
- if (logger.isDebugEnabled())
- logger.debug("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", keyspaceName);
+ if (logger.isTraceEnabled())
+ logger.trace("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", keyspaceName);
pendingRanges.put(keyspaceName, newPendingRanges);
return;
@@ -802,8 +802,8 @@ public class TokenMetadata
pendingRanges.put(keyspaceName, newPendingRanges);
- if (logger.isDebugEnabled())
- logger.debug("Pending ranges:\n{}", (pendingRanges.isEmpty() ? "<empty>" : printPendingRanges()));
+ if (logger.isTraceEnabled())
+ logger.trace("Pending ranges:\n{}", (pendingRanges.isEmpty() ? "<empty>" : printPendingRanges()));
}
finally
{
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/net/IncomingStreamingConnection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/IncomingStreamingConnection.java b/src/java/org/apache/cassandra/net/IncomingStreamingConnection.java
index 274e47b..b69ce88 100644
--- a/src/java/org/apache/cassandra/net/IncomingStreamingConnection.java
+++ b/src/java/org/apache/cassandra/net/IncomingStreamingConnection.java
@@ -70,7 +70,7 @@ public class IncomingStreamingConnection extends Thread implements Closeable
}
catch (IOException e)
{
- logger.debug("IOException reading from socket; closing", e);
+ logger.trace("IOException reading from socket; closing", e);
close();
}
}
@@ -87,7 +87,7 @@ public class IncomingStreamingConnection extends Thread implements Closeable
}
catch (IOException e)
{
- logger.debug("Error closing socket", e);
+ logger.trace("Error closing socket", e);
}
finally
{
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/IncomingTcpConnection.java b/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
index cfdb854..f6652b0 100644
--- a/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
+++ b/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
@@ -101,7 +101,7 @@ public class IncomingTcpConnection extends Thread implements Closeable
}
catch (IOException e)
{
- logger.debug("IOException reading from socket; closing", e);
+ logger.trace("IOException reading from socket; closing", e);
}
finally
{
@@ -121,7 +121,7 @@ public class IncomingTcpConnection extends Thread implements Closeable
}
catch (IOException e)
{
- logger.debug("Error closing socket", e);
+ logger.trace("Error closing socket", e);
}
finally
{
@@ -144,11 +144,11 @@ public class IncomingTcpConnection extends Thread implements Closeable
from = CompactEndpointSerializationHelper.deserialize(in);
// record the (true) version of the endpoint
MessagingService.instance().setVersion(from, maxVersion);
- logger.debug("Set version for {} to {} (will use {})", from, maxVersion, MessagingService.instance().getVersion(from));
+ logger.trace("Set version for {} to {} (will use {})", from, maxVersion, MessagingService.instance().getVersion(from));
if (compressed)
{
- logger.debug("Upgrading incoming connection to be compressed");
+ logger.trace("Upgrading incoming connection to be compressed");
if (version < MessagingService.VERSION_21)
{
in = new DataInputStream(new SnappyInputStream(socket.getInputStream()));
@@ -206,7 +206,7 @@ public class IncomingTcpConnection extends Thread implements Closeable
}
else
{
- logger.debug("Received connection from newer protocol version {}. Ignoring message", version);
+ logger.trace("Received connection from newer protocol version {}. Ignoring message", version);
}
return message.from;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/net/MessageDeliveryTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/MessageDeliveryTask.java b/src/java/org/apache/cassandra/net/MessageDeliveryTask.java
index 0ff0319..a46366c 100644
--- a/src/java/org/apache/cassandra/net/MessageDeliveryTask.java
+++ b/src/java/org/apache/cassandra/net/MessageDeliveryTask.java
@@ -57,7 +57,7 @@ public class MessageDeliveryTask implements Runnable
IVerbHandler verbHandler = MessagingService.instance().getVerbHandler(verb);
if (verbHandler == null)
{
- logger.debug("Unknown verb {}", verb);
+ logger.trace("Unknown verb {}", verb);
return;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/net/MessagingService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java
index 1f3240d..2a63553 100644
--- a/src/java/org/apache/cassandra/net/MessagingService.java
+++ b/src/java/org/apache/cassandra/net/MessagingService.java
@@ -443,7 +443,7 @@ public final class MessagingService implements MessagingServiceMBean
*/
public void convict(InetAddress ep)
{
- logger.debug("Resetting pool for {}", ep);
+ logger.trace("Resetting pool for {}", ep);
getConnectionPool(ep).reset();
}
@@ -538,7 +538,7 @@ public final class MessagingService implements MessagingServiceMBean
}
catch (InterruptedException ie)
{
- logger.debug("await interrupted");
+ logger.trace("await interrupted");
}
}
@@ -831,7 +831,7 @@ public final class MessagingService implements MessagingServiceMBean
*/
public int setVersion(InetAddress endpoint, int version)
{
- logger.debug("Setting version {} for {}", version, endpoint);
+ logger.trace("Setting version {} for {}", version, endpoint);
if (version < VERSION_22)
allNodesAtLeast22 = false;
@@ -847,7 +847,7 @@ public final class MessagingService implements MessagingServiceMBean
public void resetVersion(InetAddress endpoint)
{
- logger.debug("Resetting version for {}", endpoint);
+ logger.trace("Resetting version for {}", endpoint);
Integer removed = versions.remove(endpoint);
if (removed != null && removed <= VERSION_22)
refreshAllNodesAtLeast22();
@@ -972,7 +972,7 @@ public final class MessagingService implements MessagingServiceMBean
socket = server.accept();
if (!authenticate(socket))
{
- logger.debug("remote failed to authenticate");
+ logger.trace("remote failed to authenticate");
socket.close();
continue;
}
@@ -985,7 +985,7 @@ public final class MessagingService implements MessagingServiceMBean
int header = in.readInt();
boolean isStream = MessagingService.getBits(header, 3, 1) == 1;
int version = MessagingService.getBits(header, 15, 8);
- logger.debug("Connection version {} from {}", version, socket.getInetAddress());
+ logger.trace("Connection version {} from {}", version, socket.getInetAddress());
socket.setSoTimeout(0);
Thread thread = isStream
@@ -997,17 +997,17 @@ public final class MessagingService implements MessagingServiceMBean
catch (AsynchronousCloseException e)
{
// this happens when another thread calls close().
- logger.debug("Asynchronous close seen by server thread");
+ logger.trace("Asynchronous close seen by server thread");
break;
}
catch (ClosedChannelException e)
{
- logger.debug("MessagingService server thread already closed");
+ logger.trace("MessagingService server thread already closed");
break;
}
catch (IOException e)
{
- logger.debug("Error reading the socket " + socket, e);
+ logger.trace("Error reading the socket " + socket, e);
FileUtils.closeQuietly(socket);
}
}
@@ -1016,7 +1016,7 @@ public final class MessagingService implements MessagingServiceMBean
void close() throws IOException
{
- logger.debug("Closing accept() thread");
+ logger.trace("Closing accept() thread");
try
{
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/OutboundTcpConnection.java b/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
index cb168bc..31a5269 100644
--- a/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
+++ b/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
@@ -295,8 +295,8 @@ public class OutboundTcpConnection extends Thread
disconnect();
if (e instanceof IOException || e.getCause() instanceof IOException)
{
- if (logger.isDebugEnabled())
- logger.debug("error writing to {}", poolReference.endPoint(), e);
+ if (logger.isTraceEnabled())
+ logger.trace("error writing to {}", poolReference.endPoint(), e);
// if the message was important, such as a repair acknowledgement, put it back on the queue
// to retry after re-connecting. See CASSANDRA-5393
@@ -371,8 +371,8 @@ public class OutboundTcpConnection extends Thread
@SuppressWarnings("resource")
private boolean connect()
{
- if (logger.isDebugEnabled())
- logger.debug("attempting to connect to {}", poolReference.endPoint());
+ if (logger.isTraceEnabled())
+ logger.trace("attempting to connect to {}", poolReference.endPoint());
long start = System.nanoTime();
long timeout = TimeUnit.MILLISECONDS.toNanos(DatabaseDescriptor.getRpcTimeout());
@@ -418,7 +418,7 @@ public class OutboundTcpConnection extends Thread
// no version is returned, so disconnect an try again: we will either get
// a different target version (targetVersion < MessagingService.VERSION_12)
// or if the same version the handshake will finally succeed
- logger.debug("Target max version is {}; no version information yet, will retry", maxTargetVersion);
+ logger.trace("Target max version is {}; no version information yet, will retry", maxTargetVersion);
if (DatabaseDescriptor.getSeeds().contains(poolReference.endPoint()))
logger.warn("Seed gossip version is {}; will not connect with that version", maxTargetVersion);
disconnect();
@@ -431,7 +431,7 @@ public class OutboundTcpConnection extends Thread
if (targetVersion > maxTargetVersion)
{
- logger.debug("Target max version is {}; will reconnect with that version", maxTargetVersion);
+ logger.trace("Target max version is {}; will reconnect with that version", maxTargetVersion);
disconnect();
return false;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/ResponseVerbHandler.java b/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
index 1d9aa98..28ed365 100644
--- a/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
+++ b/src/java/org/apache/cassandra/net/ResponseVerbHandler.java
@@ -35,7 +35,7 @@ public class ResponseVerbHandler implements IVerbHandler
if (callbackInfo == null)
{
String msg = "Callback already removed for {} (from {})";
- logger.debug(msg, id, message.from);
+ logger.trace(msg, id, message.from);
Tracing.trace(msg, id, message.from);
return;
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/schema/LegacySchemaTables.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/LegacySchemaTables.java b/src/java/org/apache/cassandra/schema/LegacySchemaTables.java
index 30fe013..5eae60b 100644
--- a/src/java/org/apache/cassandra/schema/LegacySchemaTables.java
+++ b/src/java/org/apache/cassandra/schema/LegacySchemaTables.java
@@ -1357,7 +1357,7 @@ public class LegacySchemaTables
udf.body().equals(body) &&
udf.isCalledOnNullInput() == calledOnNullInput)
{
- logger.debug("Skipping duplicate compilation of already existing UDF {}", name);
+ logger.trace("Skipping duplicate compilation of already existing UDF {}", name);
return udf;
}
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/service/FileCacheService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/FileCacheService.java b/src/java/org/apache/cassandra/service/FileCacheService.java
index 1e12310..19d6a70 100644
--- a/src/java/org/apache/cassandra/service/FileCacheService.java
+++ b/src/java/org/apache/cassandra/service/FileCacheService.java
@@ -147,8 +147,8 @@ public class FileCacheService
public void put(CacheKey cacheKey, RandomAccessReader instance)
{
int memoryUsed = memoryUsage.get();
- if (logger.isDebugEnabled())
- logger.debug("Estimated memory usage is {} compared to actual usage {}", memoryUsed, sizeInBytes());
+ if (logger.isTraceEnabled())
+ logger.trace("Estimated memory usage is {} compared to actual usage {}", memoryUsed, sizeInBytes());
CacheBucket bucket = cache.getIfPresent(cacheKey);
if (memoryUsed >= MEMORY_USAGE_THRESHOLD || bucket == null)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/service/GCInspector.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/GCInspector.java b/src/java/org/apache/cassandra/service/GCInspector.java
index 3a4ec22..89a4a7e 100644
--- a/src/java/org/apache/cassandra/service/GCInspector.java
+++ b/src/java/org/apache/cassandra/service/GCInspector.java
@@ -282,8 +282,8 @@ public class GCInspector implements NotificationListener, GCInspectorMXBean
logger.warn(st);
else if (duration > MIN_LOG_DURATION)
logger.info(st);
- else if (logger.isDebugEnabled())
- logger.debug(st);
+ else if (logger.isTraceEnabled())
+ logger.trace(st);
if (duration > STAT_THRESHOLD)
StatusLogger.log();
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/service/LoadBroadcaster.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/LoadBroadcaster.java b/src/java/org/apache/cassandra/service/LoadBroadcaster.java
index 238daae..69fa93d 100644
--- a/src/java/org/apache/cassandra/service/LoadBroadcaster.java
+++ b/src/java/org/apache/cassandra/service/LoadBroadcaster.java
@@ -87,8 +87,8 @@ public class LoadBroadcaster implements IEndpointStateChangeSubscriber
{
public void run()
{
- if (logger.isDebugEnabled())
- logger.debug("Disseminating load info ...");
+ if (logger.isTraceEnabled())
+ logger.trace("Disseminating load info ...");
Gossiper.instance.addLocalApplicationState(ApplicationState.LOAD,
StorageService.instance.valueFactory.load(StorageMetrics.load.getCount()));
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/service/ReadCallback.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/ReadCallback.java b/src/java/org/apache/cassandra/service/ReadCallback.java
index 57d8064..145679d 100644
--- a/src/java/org/apache/cassandra/service/ReadCallback.java
+++ b/src/java/org/apache/cassandra/service/ReadCallback.java
@@ -109,8 +109,8 @@ public class ReadCallback<TMessage, TResolved> implements IAsyncCallbackWithFail
// Same as for writes, see AbstractWriteResponseHandler
ReadTimeoutException ex = new ReadTimeoutException(consistencyLevel, received, blockfor, resolver.isDataPresent());
Tracing.trace("Read timeout: {}", ex.toString());
- if (logger.isDebugEnabled())
- logger.debug("Read timeout: {}", ex.toString());
+ if (logger.isTraceEnabled())
+ logger.trace("Read timeout: {}", ex.toString());
throw ex;
}
@@ -118,8 +118,8 @@ public class ReadCallback<TMessage, TResolved> implements IAsyncCallbackWithFail
{
ReadFailureException ex = new ReadFailureException(consistencyLevel, received, failures, blockfor, resolver.isDataPresent());
- if (logger.isDebugEnabled())
- logger.debug("Read failure: {}", ex.toString());
+ if (logger.isTraceEnabled())
+ logger.trace("Read failure: {}", ex.toString());
throw ex;
}
@@ -210,8 +210,8 @@ public class ReadCallback<TMessage, TResolved> implements IAsyncCallbackWithFail
if (traceState != null)
traceState.trace("Digest mismatch: {}", e.toString());
- if (logger.isDebugEnabled())
- logger.debug("Digest mismatch:", e);
+ if (logger.isTraceEnabled())
+ logger.trace("Digest mismatch:", e);
ReadRepairMetrics.repairedBackground.mark();
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/service/RowDataResolver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/RowDataResolver.java b/src/java/org/apache/cassandra/service/RowDataResolver.java
index e935ce7..e15302b 100644
--- a/src/java/org/apache/cassandra/service/RowDataResolver.java
+++ b/src/java/org/apache/cassandra/service/RowDataResolver.java
@@ -59,8 +59,8 @@ public class RowDataResolver extends AbstractRowResolver
public Row resolve() throws DigestMismatchException
{
int replyCount = replies.size();
- if (logger.isDebugEnabled())
- logger.debug("resolving {} responses", replyCount);
+ if (logger.isTraceEnabled())
+ logger.trace("resolving {} responses", replyCount);
long start = System.nanoTime();
ColumnFamily resolved;
@@ -84,8 +84,8 @@ public class RowDataResolver extends AbstractRowResolver
}
resolved = resolveSuperset(versions, timestamp);
- if (logger.isDebugEnabled())
- logger.debug("versions merged");
+ if (logger.isTraceEnabled())
+ logger.trace("versions merged");
// send updates to any replica that was missing part of the full row
// (resolved can be null even if versions doesn't have all nulls because of the call to removeDeleted in resolveSuperSet)
@@ -97,8 +97,8 @@ public class RowDataResolver extends AbstractRowResolver
resolved = replies.get(0).payload.row().cf;
}
- if (logger.isDebugEnabled())
- logger.debug("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+ if (logger.isTraceEnabled())
+ logger.trace("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
return new Row(key, resolved);
}
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/service/RowDigestResolver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/RowDigestResolver.java b/src/java/org/apache/cassandra/service/RowDigestResolver.java
index 95b5b80..32b26e1 100644
--- a/src/java/org/apache/cassandra/service/RowDigestResolver.java
+++ b/src/java/org/apache/cassandra/service/RowDigestResolver.java
@@ -63,8 +63,8 @@ public class RowDigestResolver extends AbstractRowResolver
*/
public Row resolve() throws DigestMismatchException
{
- if (logger.isDebugEnabled())
- logger.debug("resolving {} responses", replies.size());
+ if (logger.isTraceEnabled())
+ logger.trace("resolving {} responses", replies.size());
long start = System.nanoTime();
@@ -98,8 +98,8 @@ public class RowDigestResolver extends AbstractRowResolver
throw new DigestMismatchException(key, digest, newDigest);
}
- if (logger.isDebugEnabled())
- logger.debug("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
+ if (logger.isTraceEnabled())
+ logger.trace("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
return new Row(key, data);
}