You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by ij...@apache.org on 2017/03/30 12:52:26 UTC
kafka git commit: MINOR: Fix typos in javadoc and code comments
Repository: kafka
Updated Branches:
refs/heads/trunk 81721f8c5 -> a3e13776e
MINOR: Fix typos in javadoc and code comments
Author: Vahid Hashemian <va...@us.ibm.com>
Reviewers: Ismael Juma <is...@juma.me.uk>
Closes #2595 from vahidhashemian/minor/fix_typos_1702
Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a3e13776
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a3e13776
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a3e13776
Branch: refs/heads/trunk
Commit: a3e13776e6c0889131ddfdaa8b10cd2ef2498603
Parents: 81721f8
Author: Vahid Hashemian <va...@us.ibm.com>
Authored: Thu Mar 30 13:52:22 2017 +0100
Committer: Ismael Juma <is...@juma.me.uk>
Committed: Thu Mar 30 13:52:22 2017 +0100
----------------------------------------------------------------------
.../java/org/apache/kafka/common/requests/MetadataRequest.java | 2 +-
.../main/java/org/apache/kafka/common/security/JaasConfig.java | 2 +-
.../main/scala/kafka/consumer/ZookeeperConsumerConnector.scala | 4 ++--
core/src/main/scala/kafka/log/Log.scala | 2 +-
core/src/main/scala/kafka/log/TimeIndex.scala | 4 ++--
core/src/main/scala/kafka/utils/Annotations.scala | 2 +-
core/src/main/scala/kafka/utils/ZkUtils.scala | 2 +-
core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala | 2 +-
.../main/java/org/apache/kafka/streams/kstream/TimeWindows.java | 4 ++--
.../kafka/streams/state/internals/CachingKeyValueStore.java | 2 +-
.../streams/processor/internals/ProcessorStateManagerTest.java | 4 ++--
.../streams/processor/internals/StreamPartitionAssignorTest.java | 2 +-
12 files changed, 16 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
index 98d57ed..97072d5 100644
--- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
+++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
@@ -82,7 +82,7 @@ public class MetadataRequest extends AbstractRequest {
private final List<String> topics;
/**
- * In v0 null is not allowed and and empty list indicates requesting all topics.
+ * In v0 null is not allowed and an empty list indicates requesting all topics.
* Note: modern clients do not support sending v0 requests.
* In v1 null indicates requesting all topics, and an empty list indicates requesting no topics.
*/
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/clients/src/main/java/org/apache/kafka/common/security/JaasConfig.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasConfig.java b/clients/src/main/java/org/apache/kafka/common/security/JaasConfig.java
index e0bcffe..24bdac2 100644
--- a/clients/src/main/java/org/apache/kafka/common/security/JaasConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/security/JaasConfig.java
@@ -34,7 +34,7 @@ import org.apache.kafka.common.config.SaslConfigs;
/**
* JAAS configuration parser that constructs a JAAS configuration object with a single
- * login context from the the Kafka configuration option {@link SaslConfigs#SASL_JAAS_CONFIG}.
+ * login context from the Kafka configuration option {@link SaslConfigs#SASL_JAAS_CONFIG}.
* <p/>
* JAAS configuration file format is described <a href="http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html">here</a>.
* The format of the property value is:
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala b/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
index fde4710..c5ad94a 100755
--- a/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
+++ b/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
@@ -217,7 +217,7 @@ private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
rebalanceLock synchronized {
try {
if (config.autoCommitEnable)
- scheduler.shutdown()
+ scheduler.shutdown()
fetcher match {
case Some(f) => f.stopConnections
case None =>
@@ -677,7 +677,7 @@ private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
val brokers = zkUtils.getAllBrokersInCluster()
if (brokers.size == 0) {
// This can happen in a rare case when there are no brokers available in the cluster when the consumer is started.
- // We log an warning and register for child changes on brokers/id so that rebalance can be triggered when the brokers
+ // We log a warning and register for child changes on brokers/id so that rebalance can be triggered when the brokers
// are up.
warn("no brokers found when trying to rebalance.")
zkUtils.zkClient.subscribeChildChanges(BrokerIdsPath, loadBalancerListener)
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/core/src/main/scala/kafka/log/Log.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/log/Log.scala b/core/src/main/scala/kafka/log/Log.scala
index 96535b1..2a81f26 100644
--- a/core/src/main/scala/kafka/log/Log.scala
+++ b/core/src/main/scala/kafka/log/Log.scala
@@ -712,7 +712,7 @@ class Log(@volatile var dir: File,
}
/**
- * Find segments starting from the oldest until the the user-supplied predicate is false.
+ * Find segments starting from the oldest until the user-supplied predicate is false.
* A final segment that is empty will never be returned (since we would just end up re-creating it).
* @param predicate A function that takes in a single log segment and returns true iff it is deletable
* @return the segments ready to be deleted
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/core/src/main/scala/kafka/log/TimeIndex.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/log/TimeIndex.scala b/core/src/main/scala/kafka/log/TimeIndex.scala
index 5f2b387..0cedbca 100644
--- a/core/src/main/scala/kafka/log/TimeIndex.scala
+++ b/core/src/main/scala/kafka/log/TimeIndex.scala
@@ -111,13 +111,13 @@ class TimeIndex(file: File,
// to insert the same time index entry as the last entry.
// If the timestamp index entry to be inserted is the same as the last entry, we simply ignore the insertion
// because that could happen in the following two scenarios:
- // 1. An log segment is closed.
+ // 1. A log segment is closed.
// 2. LogSegment.onBecomeInactiveSegment() is called when an active log segment is rolled.
if (_entries != 0 && offset < lastEntry.offset)
throw new InvalidOffsetException("Attempt to append an offset (%d) to slot %d no larger than the last offset appended (%d) to %s."
.format(offset, _entries, lastEntry.offset, file.getAbsolutePath))
if (_entries != 0 && timestamp < lastEntry.timestamp)
- throw new IllegalStateException("Attempt to append an timestamp (%d) to slot %d no larger than the last timestamp appended (%d) to %s."
+ throw new IllegalStateException("Attempt to append a timestamp (%d) to slot %d no larger than the last timestamp appended (%d) to %s."
.format(timestamp, _entries, lastEntry.timestamp, file.getAbsolutePath))
// We only append to the time index when the timestamp is greater than the last inserted timestamp.
// If all the messages are in message format v0, the timestamp will always be NoTimestamp. In that case, the time
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/core/src/main/scala/kafka/utils/Annotations.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/utils/Annotations.scala b/core/src/main/scala/kafka/utils/Annotations.scala
index ab95ce1..da4a25c 100644
--- a/core/src/main/scala/kafka/utils/Annotations.scala
+++ b/core/src/main/scala/kafka/utils/Annotations.scala
@@ -22,7 +22,7 @@ import scala.annotation.StaticAnnotation
/* Some helpful annotations */
/**
- * Indicates that the annotated class is meant to be threadsafe. For an abstract class it is an part of the interface that an implementation
+ * Indicates that the annotated class is meant to be threadsafe. For an abstract class it is a part of the interface that an implementation
* must respect
*/
class threadsafe extends StaticAnnotation
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/core/src/main/scala/kafka/utils/ZkUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala b/core/src/main/scala/kafka/utils/ZkUtils.scala
index e67e264..aa55479 100644
--- a/core/src/main/scala/kafka/utils/ZkUtils.scala
+++ b/core/src/main/scala/kafka/utils/ZkUtils.scala
@@ -472,7 +472,7 @@ class ZkUtils(val zkClient: ZkClient,
}
/**
- * Create an persistent node with the given path and data. Create parents if necessary.
+ * Create a persistent node with the given path and data. Create parents if necessary.
*/
def createPersistentPath(path: String, data: String = "", acls: java.util.List[ACL] = DefaultAcls): Unit = {
try {
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala
index 27b89d5..d3535c6 100644
--- a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala
+++ b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala
@@ -255,7 +255,7 @@ abstract class BaseConsumerTest extends IntegrationTestHarness {
*/
def subscribe(newTopicsToSubscribe: List[String]): Unit = {
if (subscriptionChanged) {
- throw new IllegalStateException("Do not call subscribe until the previous subsribe request is processed.")
+ throw new IllegalStateException("Do not call subscribe until the previous subscribe request is processed.")
}
topicsSubscription = newTopicsToSubscribe
subscriptionChanged = true
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindows.java
----------------------------------------------------------------------
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindows.java b/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindows.java
index 3801691..7a2d00e 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindows.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindows.java
@@ -74,7 +74,7 @@ public final class TimeWindows extends Windows<TimeWindow> {
/**
* Return a window definition with the given window size, and with the advance interval being equal to the window
* size.
- * The time interval represented by the the N-th window is: {@code [N * size, N * size + size)}.
+ * The time interval represented by the N-th window is: {@code [N * size, N * size + size)}.
* <p>
* This provides the semantics of tumbling windows, which are fixed-sized, gap-less, non-overlapping windows.
* Tumbling windows are a special case of hopping windows with {@code advance == size}.
@@ -93,7 +93,7 @@ public final class TimeWindows extends Windows<TimeWindow> {
/**
* Return a window definition with the original size, but advance ("hop") the window by the given interval, which
* specifies by how much a window moves forward relative to the previous one.
- * The time interval represented by the the N-th window is: {@code [N * advance, N * advance + size)}.
+ * The time interval represented by the N-th window is: {@code [N * advance, N * advance + size)}.
* <p>
* This provides the semantics of hopping windows, which are fixed-sized, overlapping windows.
*
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java
----------------------------------------------------------------------
diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java
index a5a5618..d9ef688 100644
--- a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java
+++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java
@@ -62,7 +62,7 @@ class CachingKeyValueStore<K, V> extends WrappedStateStore.AbstractStateStore im
underlying.init(context, root);
initInternal(context);
// save the stream thread as we only ever want to trigger a flush
- // when the stream thread is the the current thread.
+ // when the stream thread is the current thread.
streamThread = Thread.currentThread();
}
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
----------------------------------------------------------------------
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
index fe8c186..df041cc 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
@@ -156,7 +156,7 @@ public class ProcessorStateManagerTest {
MockStateStoreSupplier.MockStateStore store2 = new MockStateStoreSupplier.MockStateStore(storeName2, true);
MockStateStoreSupplier.MockStateStore store3 = new MockStateStoreSupplier.MockStateStore(storeName3, true);
- // if there is an source partition, inherit the partition id
+ // if there is a source partition, inherit the partition id
Set<TopicPartition> sourcePartitions = Utils.mkSet(new TopicPartition(storeTopicName3, 1));
ProcessorStateManager stateMgr = new ProcessorStateManager(taskId, sourcePartitions, true, stateDirectory, storeToChangelogTopic, changelogReader); // standby
@@ -389,7 +389,7 @@ public class ProcessorStateManagerTest {
} catch (final IllegalArgumentException e) {
// pass
}
-
+
}
@Test
http://git-wip-us.apache.org/repos/asf/kafka/blob/a3e13776/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamPartitionAssignorTest.java
----------------------------------------------------------------------
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamPartitionAssignorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamPartitionAssignorTest.java
index 97d8815..4cc7b92 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamPartitionAssignorTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamPartitionAssignorTest.java
@@ -999,7 +999,7 @@ public class StreamPartitionAssignorTest {
private AssignmentInfo checkAssignment(Set<String> expectedTopics, PartitionAssignor.Assignment assignment) {
- // This assumed 1) DefaultPartitionGrouper is used, and 2) there is a only one topic group.
+ // This assumed 1) DefaultPartitionGrouper is used, and 2) there is an only one topic group.
AssignmentInfo info = AssignmentInfo.decode(assignment.userData());