You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by gw...@apache.org on 2015/12/15 22:47:03 UTC

kafka git commit: MINOR: Fix typos in code comments

Repository: kafka
Updated Branches:
  refs/heads/trunk 9545cc883 -> 3f3358b6d


MINOR: Fix typos in code comments

Author: Vahid Hashemian <va...@us.ibm.com>

Reviewers: Gwen Shapira

Closes #673 from vahidhashemian/typo02/fix_typos_in_code_comments


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/3f3358b6
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/3f3358b6
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/3f3358b6

Branch: refs/heads/trunk
Commit: 3f3358b6d4374662f5ca57c6e93e009b58a6b2a2
Parents: 9545cc8
Author: Vahid Hashemian <va...@us.ibm.com>
Authored: Tue Dec 15 13:46:57 2015 -0800
Committer: Gwen Shapira <cs...@gmail.com>
Committed: Tue Dec 15 13:46:57 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/kafka/clients/consumer/KafkaConsumer.java  | 4 ++--
 .../clients/consumer/internals/ConsumerNetworkClient.java      | 2 +-
 clients/src/main/java/org/apache/kafka/common/cache/Cache.java | 2 +-
 .../kafka/common/errors/GroupLoadInProgressException.java      | 2 +-
 .../src/main/java/org/apache/kafka/common/metrics/Stat.java    | 2 +-
 .../org/apache/kafka/common/metrics/stats/SampledStat.java     | 4 ++--
 .../main/java/org/apache/kafka/common/network/Selector.java    | 4 ++--
 .../java/org/apache/kafka/common/network/TransportLayer.java   | 2 +-
 clients/src/main/java/org/apache/kafka/common/utils/Utils.java | 2 +-
 .../org/apache/kafka/common/network/SslTransportLayerTest.java | 2 +-
 clients/src/test/java/org/apache/kafka/test/TestSslUtils.java  | 2 +-
 .../java/org/apache/kafka/connect/runtime/WorkerSinkTask.java  | 2 +-
 .../kafka/connect/runtime/distributed/ClusterConfigState.java  | 2 +-
 .../kafka/connect/runtime/distributed/DistributedHerder.java   | 2 +-
 .../org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java   | 2 +-
 .../main/scala/kafka/common/MessageStreamsExistException.scala | 2 +-
 core/src/main/scala/kafka/log/OffsetMap.scala                  | 2 +-
 core/src/main/scala/kafka/message/MessageLengthException.scala | 2 +-
 core/src/main/scala/kafka/tools/ZooKeeperMainWrapper.scala     | 2 +-
 core/src/main/scala/kafka/utils/CoreUtils.scala                | 4 ++--
 core/src/main/scala/kafka/utils/ZkUtils.scala                  | 4 ++--
 .../scala/integration/kafka/api/PlaintextConsumerTest.scala    | 6 +++---
 core/src/test/scala/unit/kafka/log/LogSegmentTest.scala        | 2 +-
 .../scala/unit/kafka/message/BaseMessageSetTestCases.scala     | 2 +-
 core/src/test/scala/unit/kafka/utils/TestUtils.scala           | 6 +++---
 25 files changed, 34 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index 2c0db37..7ea293d 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -326,9 +326,9 @@ import java.util.regex.Pattern;
  *
  * <p>
  * One of such cases is stream processing, where processor fetches from two topics and performs the join on these two streams.
- * When one of the topic is long lagging behind the other, the processor would like to pause fetching from the ahead topic
+ * When one of the topics is long lagging behind the other, the processor would like to pause fetching from the ahead topic
  * in order to get the lagging stream to catch up. Another example is bootstraping upon consumer starting up where there are
- * a lot of history data to catch up, the applciations usually wants to get the latest data on some of the topics before consider
+ * a lot of history data to catch up, the applications usually want to get the latest data on some of the topics before consider
  * fetching other topics.
  *
  * <p>

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
index f707d6f..4492306 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
@@ -213,7 +213,7 @@ public class ConsumerNetworkClient implements Closeable {
         clientPoll(timeout, now);
         now = time.milliseconds();
 
-        // handle any disconnects by failing the active requests. note that disconects must
+        // handle any disconnects by failing the active requests. note that disconnects must
         // be checked immediately following poll since any subsequent call to client.ready()
         // will reset the disconnect status
         checkDisconnects(now);

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/cache/Cache.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/cache/Cache.java b/clients/src/main/java/org/apache/kafka/common/cache/Cache.java
index 6678e40..6c81faf 100644
--- a/clients/src/main/java/org/apache/kafka/common/cache/Cache.java
+++ b/clients/src/main/java/org/apache/kafka/common/cache/Cache.java
@@ -18,7 +18,7 @@
 package org.apache.kafka.common.cache;
 
 /**
- * Interface for caches, semi-peristent maps which store key-value mappings until either an eviction criteria is met
+ * Interface for caches, semi-persistent maps which store key-value mappings until either an eviction criteria is met
  * or the entries are manually invalidated. Caches are not required to be thread-safe, but some implementations may be.
  */
 public interface Cache<K, V> {

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/errors/GroupLoadInProgressException.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/errors/GroupLoadInProgressException.java b/clients/src/main/java/org/apache/kafka/common/errors/GroupLoadInProgressException.java
index 17e205f..e227ca2 100644
--- a/clients/src/main/java/org/apache/kafka/common/errors/GroupLoadInProgressException.java
+++ b/clients/src/main/java/org/apache/kafka/common/errors/GroupLoadInProgressException.java
@@ -14,7 +14,7 @@
 package org.apache.kafka.common.errors;
 
 /**
- * The broker returns this error code for any coordiantor request if it is still loading the metadata (after a leader change
+ * The broker returns this error code for any coordinator request if it is still loading the metadata (after a leader change
  * for that offsets topic partition) for this group.
  */
 public class GroupLoadInProgressException extends RetriableException {

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/metrics/Stat.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Stat.java b/clients/src/main/java/org/apache/kafka/common/metrics/Stat.java
index 0eb7ab2..d4cfa39 100644
--- a/clients/src/main/java/org/apache/kafka/common/metrics/Stat.java
+++ b/clients/src/main/java/org/apache/kafka/common/metrics/Stat.java
@@ -17,7 +17,7 @@
 package org.apache.kafka.common.metrics;
 
 /**
- * A Stat is a quanity such as average, max, etc that is computed off the stream of updates to a sensor
+ * A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor
  */
 public interface Stat {
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java b/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java
index b341b7d..7d52ed3 100644
--- a/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java
+++ b/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java
@@ -20,8 +20,8 @@ import org.apache.kafka.common.metrics.MetricConfig;
 
 /**
  * A SampledStat records a single scalar value measured over one or more samples. Each sample is recorded over a
- * configurable window. The window can be defined by number of events or ellapsed time (or both, if both are given the
- * window is complete when <i>either</i> the event count or ellapsed time criterion is met).
+ * configurable window. The window can be defined by number of events or elapsed time (or both, if both are given the
+ * window is complete when <i>either</i> the event count or elapsed time criterion is met).
  * <p>
  * All the samples are combined to produce the measurement. When a window is complete the oldest sample is cleared and
  * recycled to begin recording the next sample.

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/network/Selector.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
index 387c063..3f29f15 100644
--- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java
+++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
@@ -228,7 +228,7 @@ public class Selector implements Selectable {
      *
      * In the "Plaintext" setting, we are using socketChannel to read & write to the network. But for the "SSL" setting,
      * we encrypt the data before we use socketChannel to write data to the network, and decrypt before we return the responses.
-     * This requires additional buffers to be maintained as we are reading from network, since the data on the wire is encrpyted
+     * This requires additional buffers to be maintained as we are reading from network, since the data on the wire is encrypted
      * we won't be able to read exact no.of bytes as kafka protocol requires. We read as many bytes as we can, up to SSLEngine's
      * application buffer size. This means we might be reading additional bytes than the requested size.
      * If there is no further data to read from socketChannel selector won't invoke that channel and we've have additional bytes
@@ -510,7 +510,7 @@ public class Selector implements Selectable {
 
 
     /**
-     * adds a receive to staged receieves
+     * adds a receive to staged receives
      */
     private void addToStagedReceives(KafkaChannel channel, NetworkReceive receive) {
         if (!stagedReceives.containsKey(channel))

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java
index 7459774..258d89d 100644
--- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java
+++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java
@@ -19,7 +19,7 @@ package org.apache.kafka.common.network;
 
 /*
  * Transport layer for underlying communication.
- * At very basic level it is wrapper around SocketChannel and can be used as substitue for SocketChannel
+ * At very basic level it is wrapper around SocketChannel and can be used as substitute for SocketChannel
  * and other network Channel implementations.
  * As NetworkClient replaces BlockingChannel and other implementations we will be using KafkaChannel as
  * a network I/O channel.

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
index 974cf1e..ac6e132 100755
--- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
+++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
@@ -129,7 +129,7 @@ public class Utils {
 
     /**
      * Get the little-endian value of an integer as a byte array.
-     * @param val The value to convert to a litte-endian array
+     * @param val The value to convert to a little-endian array
      * @return The little-endian encoded array of bytes for the value
      */
     public static byte[] toArrayLE(int val) {

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java
----------------------------------------------------------------------
diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java
index 34ea136..d8a037c 100644
--- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java
+++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java
@@ -172,7 +172,7 @@ public class SslTransportLayerTest {
     }
     
     /**
-     * Tests that server does not accept connections from clients which dont
+     * Tests that server does not accept connections from clients which don't
      * provide a certificate when client authentication is required.
      */
     @Test

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java
----------------------------------------------------------------------
diff --git a/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java
index 5420b26..b92a06d 100644
--- a/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java
+++ b/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java
@@ -65,7 +65,7 @@ public class TestSslUtils {
      * @param days how many days from now the Certificate is valid for
      * @param algorithm the signing algorithm, eg "SHA1withRSA"
      * @return the self-signed certificate
-     * @throws CertificateException thrown if a security error or an IO error ocurred.
+     * @throws CertificateException thrown if a security error or an IO error occurred.
      */
     public static X509Certificate generateCertificate(String dn, KeyPair pair,
                                                       int days, String algorithm)

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
----------------------------------------------------------------------
diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
index 686e564..a67d0af 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
@@ -129,7 +129,7 @@ class WorkerSinkTask implements WorkerTask {
     }
 
     /**
-     * Preforms initial join process for consumer group, ensures we have an assignment, and initializes + starts the
+     * Performs initial join process for consumer group, ensures we have an assignment, and initializes + starts the
      * SinkTask.
      *
      * @returns true if successful, false if joining the consumer group was interrupted

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ClusterConfigState.java
----------------------------------------------------------------------
diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ClusterConfigState.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ClusterConfigState.java
index 098872c..cc4a3c1 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ClusterConfigState.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ClusterConfigState.java
@@ -86,7 +86,7 @@ public class ClusterConfigState {
     }
 
     /**
-     * Get the number of tasks assigned for the given conncetor.
+     * Get the number of tasks assigned for the given connector.
      * @param connectorName name of the connector to look up tasks for
      * @return the number of tasks
      */

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
----------------------------------------------------------------------
diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
index 85db168..7caaabb 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
@@ -545,7 +545,7 @@ public class DistributedHerder implements Herder, Runnable {
         //      even attempting to. If we can't we should drop out of the group because we will block everyone from making
         //      progress. We can backoff and try rejoining later.
         //  1b. We are not the leader. We might need to catch up. If we're already caught up we can rejoin immediately,
-        //      otherwise, we just want to wait indefinitely to catch up and rejoin whenver we're finally ready.
+        //      otherwise, we just want to wait indefinitely to catch up and rejoin whenever we're finally ready.
         // 2. Assignment succeeded.
         //  2a. We are caught up on configs. Awesome! We can proceed to run our assigned work.
         //  2b. We need to try to catch up. We can do this potentially indefinitely because if it takes to long, we'll

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java
----------------------------------------------------------------------
diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java
index 62ec5a8..d5eaace 100644
--- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java
+++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java
@@ -135,7 +135,7 @@ public class WorkerSinkTaskTest {
         consumer.pause(TOPIC_PARTITION2);
         PowerMock.expectLastCall();
 
-        // Retry delivery should suceed
+        // Retry delivery should succeed
         expectConsumerPoll(0);
         sinkTask.put(EasyMock.capture(records));
         EasyMock.expectLastCall();

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/main/scala/kafka/common/MessageStreamsExistException.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/common/MessageStreamsExistException.scala b/core/src/main/scala/kafka/common/MessageStreamsExistException.scala
index 68a2e07..b904ed0 100644
--- a/core/src/main/scala/kafka/common/MessageStreamsExistException.scala
+++ b/core/src/main/scala/kafka/common/MessageStreamsExistException.scala
@@ -17,7 +17,7 @@
 package kafka.common
 
 /**
- * Indicates a createMessageStreams can't be called more thane once
+ * Indicates a createMessageStreams can't be called more than once
 */
 class MessageStreamsExistException(message: String, t: Throwable) extends RuntimeException(message, t) {
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/main/scala/kafka/log/OffsetMap.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/log/OffsetMap.scala b/core/src/main/scala/kafka/log/OffsetMap.scala
index 303aad5..3893b2c 100755
--- a/core/src/main/scala/kafka/log/OffsetMap.scala
+++ b/core/src/main/scala/kafka/log/OffsetMap.scala
@@ -42,7 +42,7 @@ trait OffsetMap {
 class SkimpyOffsetMap(val memory: Int, val hashAlgorithm: String = "MD5") extends OffsetMap {
   private val bytes = ByteBuffer.allocate(memory)
   
-  /* the hash algorithm instance to use, defualt is MD5 */
+  /* the hash algorithm instance to use, default is MD5 */
   private val digest = MessageDigest.getInstance(hashAlgorithm)
   
   /* the number of bytes for this hash algorithm */

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/main/scala/kafka/message/MessageLengthException.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/message/MessageLengthException.scala b/core/src/main/scala/kafka/message/MessageLengthException.scala
index 752d1eb..45d32a5 100644
--- a/core/src/main/scala/kafka/message/MessageLengthException.scala
+++ b/core/src/main/scala/kafka/message/MessageLengthException.scala
@@ -18,7 +18,7 @@
 package kafka.message
 
 /**
- * Indicates the presense of a message that exceeds the maximum acceptable 
+ * Indicates the presence of a message that exceeds the maximum acceptable 
  * length (whatever that happens to be)
  */
 class MessageLengthException(message: String) extends RuntimeException(message)

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/main/scala/kafka/tools/ZooKeeperMainWrapper.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/tools/ZooKeeperMainWrapper.scala b/core/src/main/scala/kafka/tools/ZooKeeperMainWrapper.scala
index 4c51f31..0cbe62c 100644
--- a/core/src/main/scala/kafka/tools/ZooKeeperMainWrapper.scala
+++ b/core/src/main/scala/kafka/tools/ZooKeeperMainWrapper.scala
@@ -28,7 +28,7 @@ class ZooKeeperMainWrapper(args: Array[String]) extends ZooKeeperMain(args) {
 
 /**
  * ZooKeeper 3.4.6 broke being able to pass commands on command line.
- * See ZOOKEEPER-1897.  This class is a hack to restore this faclity.
+ * See ZOOKEEPER-1897.  This class is a hack to restore this facility.
  */
 object ZooKeeperMainWrapper {
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/main/scala/kafka/utils/CoreUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/utils/CoreUtils.scala b/core/src/main/scala/kafka/utils/CoreUtils.scala
index b70a1e6..ecd3572 100755
--- a/core/src/main/scala/kafka/utils/CoreUtils.scala
+++ b/core/src/main/scala/kafka/utils/CoreUtils.scala
@@ -63,7 +63,7 @@ object CoreUtils extends Logging {
   /**
    * Create a daemon thread
    * @param name The name of the thread
-   * @param fun The runction to execute in the thread
+   * @param fun The function to execute in the thread
    * @return The unstarted thread
    */
   def daemonThread(name: String, fun: => Unit): Thread =
@@ -162,7 +162,7 @@ object CoreUtils extends Logging {
   def crc32(bytes: Array[Byte]): Long = crc32(bytes, 0, bytes.length)
 
   /**
-   * Compute the CRC32 of the segment of the byte array given by the specificed size and offset
+   * Compute the CRC32 of the segment of the byte array given by the specified size and offset
    * @param bytes The bytes to checksum
    * @param offset the offset at which to begin checksumming
    * @param size the number of bytes to checksum

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/main/scala/kafka/utils/ZkUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala b/core/src/main/scala/kafka/utils/ZkUtils.scala
index 30d45a3..7061333 100644
--- a/core/src/main/scala/kafka/utils/ZkUtils.scala
+++ b/core/src/main/scala/kafka/utils/ZkUtils.scala
@@ -403,7 +403,7 @@ class ZkUtils(val zkClient: ZkClient,
 
   /**
    * Update the value of a persistent node with the given path and data.
-   * create parrent directory if necessary. Never throw NodeExistException.
+   * create parent directory if necessary. Never throw NodeExistException.
    * Return the updated path zkVersion
    */
   def updatePersistentPath(path: String, data: String, acls: java.util.List[ACL] = DefaultAcls) = {
@@ -476,7 +476,7 @@ class ZkUtils(val zkClient: ZkClient,
 
   /**
    * Update the value of a persistent node with the given path and data.
-   * create parrent directory if necessary. Never throw NodeExistException.
+   * create parent directory if necessary. Never throw NodeExistException.
    */
   def updateEphemeralPath(path: String, data: String, acls: java.util.List[ACL] = DefaultAcls): Unit = {
     try {

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala
index 90e9562..47b5d8f 100644
--- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala
+++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala
@@ -615,12 +615,12 @@ class PlaintextConsumerTest extends BaseConsumerTest {
    * pollers for these consumers. Wait for partition re-assignment and validate.
    *
    * Currently, assignment validation requires that total number of partitions is greater or equal to
-   * number of consumers, so subscriptions.size must be greate or equal the resulting number of consumers in the group
+   * number of consumers, so subscriptions.size must be greater or equal the resulting number of consumers in the group
    *
    * @param numOfConsumersToAdd number of consumers to create and add to the consumer group
    * @param consumerGroup current consumer group
    * @param consumerPollers current consumer pollers
-   * @param topicsToSubscribe topics to which new consumers will subsribe to
+   * @param topicsToSubscribe topics to which new consumers will subscribe to
    * @param subscriptions set of all topic partitions
    */
   def addConsumersToGroupAndWaitForGroupAssignment(numOfConsumersToAdd: Int,
@@ -664,7 +664,7 @@ class PlaintextConsumerTest extends BaseConsumerTest {
     for (poller <- consumerPollers)
       poller.subscribe(topicsToSubscribe)
 
-    // since subscribe call to poller does not actually call consumer subsribe right away, wait
+    // since subscribe call to poller does not actually call consumer subscribe right away, wait
     // until subscribe is called on all consumers
     TestUtils.waitUntilTrue(() => {
       consumerPollers forall (poller => poller.isSubscribeRequestProcessed())

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala b/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala
index fa982b1..7b80c27 100644
--- a/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala
@@ -260,7 +260,7 @@ class LogSegmentTest {
     val oldFileSize = seg.log.file.length
     assertEquals(512*1024*1024, oldFileSize)
     seg.close()
-    //After close, file should be trimed
+    //After close, file should be trimmed
     assertEquals(oldSize, seg.log.file.length)
 
     val segReopen = new LogSegment(tempDir, 40, 10, 1000, 0, SystemTime, true,  512*1024*1024, true)

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala b/core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala
index 208994b..10687d1 100644
--- a/core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala
+++ b/core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala
@@ -61,7 +61,7 @@ trait BaseMessageSetTestCases extends JUnitSuite {
   }
 
   def testWriteToWithMessageSet(set: MessageSet) {
-    // do the write twice to ensure the message set is restored to its orginal state
+    // do the write twice to ensure the message set is restored to its original state
     for(i <- List(0,1)) {
       val file = tempFile()
       val channel = new RandomAccessFile(file, "rw").getChannel()

http://git-wip-us.apache.org/repos/asf/kafka/blob/3f3358b6/core/src/test/scala/unit/kafka/utils/TestUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
index 3ef9714..c04b52c 100755
--- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala
+++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
@@ -401,12 +401,12 @@ object TestUtils extends Logging {
   }
 
   /**
-   * Create a hexidecimal string for the given bytes
+   * Create a hexadecimal string for the given bytes
    */
   def hexString(bytes: Array[Byte]): String = hexString(ByteBuffer.wrap(bytes))
 
   /**
-   * Create a hexidecimal string for the given bytes
+   * Create a hexadecimal string for the given bytes
    */
   def hexString(buffer: ByteBuffer): String = {
     val builder = new StringBuilder("0x")
@@ -711,7 +711,7 @@ object TestUtils extends Logging {
 
   /**
    * Execute the given block. If it throws an assert error, retry. Repeat
-   * until no error is thrown or the time limit ellapses
+   * until no error is thrown or the time limit elapses
    */
   def retry(maxWaitMs: Long)(block: => Unit) {
     var wait = 1L