You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by ij...@apache.org on 2018/06/12 06:02:41 UTC
[kafka] branch trunk updated: MINOR: Use SL4J string interpolation
instead of string concatenation (#5113)
This is an automated email from the ASF dual-hosted git repository.
ijuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new f0b1b46 MINOR: Use SL4J string interpolation instead of string concatenation (#5113)
f0b1b46 is described below
commit f0b1b46486ca67a5d44b68ee076af0d6e986764a
Author: Dhruvil Shah <dh...@confluent.io>
AuthorDate: Mon Jun 11 23:02:19 2018 -0700
MINOR: Use SL4J string interpolation instead of string concatenation (#5113)
Also tweak logging message slightly and use Records.LOG_OVERHEAD definition.
Reviewers: Ismael Juma <is...@juma.me.uk>
---
.../apache/kafka/common/record/LazyDownConversionRecordsSend.java | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java
index b782114..e60e1ed 100644
--- a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java
+++ b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java
@@ -63,7 +63,7 @@ public final class LazyDownConversionRecordsSend extends RecordsSend<LazyDownCon
" converted_records_size: " + sizeOfFirstConvertedBatch);
recordConversionStats.add(recordsAndStats.recordConversionStats());
- log.debug("Got lazy converted records for {" + topicPartition() + "} with length=" + convertedRecords.sizeInBytes());
+ log.debug("Got lazy converted records for partition {} with length={}", topicPartition(), convertedRecords.sizeInBytes());
} else {
if (previouslyWritten == 0)
throw new EOFException("Unable to get the first batch of down-converted records");
@@ -75,10 +75,8 @@ public final class LazyDownConversionRecordsSend extends RecordsSend<LazyDownCon
// BaseOffset => Int64
// Length => Int32
// ...
- // TODO: check if there is a better way to encapsulate this logic, perhaps in DefaultRecordBatch
- log.debug("Constructing fake message batch for topic-partition {" + topicPartition() + "} for remaining length " + remaining);
- int minLength = (Long.SIZE / Byte.SIZE) + (Integer.SIZE / Byte.SIZE);
- ByteBuffer fakeMessageBatch = ByteBuffer.allocate(Math.max(minLength, Math.min(remaining + 1, MAX_READ_SIZE)));
+ log.debug("Constructing fake message batch for partition {} for remaining length={}", topicPartition(), remaining);
+ ByteBuffer fakeMessageBatch = ByteBuffer.allocate(Math.max(Records.LOG_OVERHEAD, Math.min(remaining + 1, MAX_READ_SIZE)));
fakeMessageBatch.putLong(-1L);
fakeMessageBatch.putInt(remaining + 1);
convertedRecords = MemoryRecords.readableRecords(fakeMessageBatch);
--
To stop receiving notification emails like this one, please contact
ijuma@apache.org.