You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by ij...@apache.org on 2017/07/22 09:35:19 UTC

kafka git commit: MINOR: Improve log warning to include the log name

Repository: kafka
Updated Branches:
  refs/heads/trunk c71e7fa3a -> 7727d7a9a


MINOR: Improve log warning to include the log name

Author: Ismael Juma <is...@juma.me.uk>

Reviewers: Rajini Sivaram <ra...@googlemail.com>

Closes #3562 from ijuma/tweak-log-warning


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7727d7a9
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7727d7a9
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7727d7a9

Branch: refs/heads/trunk
Commit: 7727d7a9a574ec4305bcf9f490b329f4ffcf4824
Parents: c71e7fa
Author: Ismael Juma <is...@juma.me.uk>
Authored: Sat Jul 22 10:35:02 2017 +0100
Committer: Ismael Juma <is...@juma.me.uk>
Committed: Sat Jul 22 10:35:02 2017 +0100

----------------------------------------------------------------------
 .../src/main/scala/kafka/log/LogCleanerManager.scala | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/7727d7a9/core/src/main/scala/kafka/log/LogCleanerManager.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/log/LogCleanerManager.scala b/core/src/main/scala/kafka/log/LogCleanerManager.scala
index 4a4a59f..ed0cb69 100755
--- a/core/src/main/scala/kafka/log/LogCleanerManager.scala
+++ b/core/src/main/scala/kafka/log/LogCleanerManager.scala
@@ -287,7 +287,7 @@ private[log] object LogCleanerManager extends Logging {
       if (offset < logStartOffset) {
         // don't bother with the warning if compact and delete are enabled.
         if (!isCompactAndDelete(log))
-          warn(s"Resetting first dirty offset to log start offset $logStartOffset since the checkpointed offset $offset is invalid.")
+          warn(s"Resetting first dirty offset of ${log.name} to log start offset $logStartOffset since the checkpointed offset $offset is invalid.")
         logStartOffset
       } else {
         offset
@@ -302,7 +302,7 @@ private[log] object LogCleanerManager extends Logging {
     // find first segment that cannot be cleaned
     // neither the active segment, nor segments with any messages closer to the head of the log than the minimum compaction lag time
     // may be cleaned
-    val firstUncleanableDirtyOffset: Long = Seq (
+    val firstUncleanableDirtyOffset: Long = Seq(
 
       // we do not clean beyond the first unstable offset
       log.firstUnstableOffset.map(_.messageOffset),
@@ -312,12 +312,11 @@ private[log] object LogCleanerManager extends Logging {
 
       // the first segment whose largest message timestamp is within a minimum time lag from now
       if (compactionLagMs > 0) {
-        dirtyNonActiveSegments.find {
-          s =>
-            val isUncleanable = s.largestTimestamp > now - compactionLagMs
-            debug(s"Checking if log segment may be cleaned: log='${log.name}' segment.baseOffset=${s.baseOffset} segment.largestTimestamp=${s.largestTimestamp}; now - compactionLag=${now - compactionLagMs}; is uncleanable=$isUncleanable")
-            isUncleanable
-        } map(_.baseOffset)
+        dirtyNonActiveSegments.find { s =>
+          val isUncleanable = s.largestTimestamp > now - compactionLagMs
+          debug(s"Checking if log segment may be cleaned: log='${log.name}' segment.baseOffset=${s.baseOffset} segment.largestTimestamp=${s.largestTimestamp}; now - compactionLag=${now - compactionLagMs}; is uncleanable=$isUncleanable")
+          isUncleanable
+        }.map(_.baseOffset)
       } else None
     ).flatten.min