You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ya...@apache.org on 2020/11/17 06:29:47 UTC

[spark] branch branch-3.0 updated: [MINOR][GRAPHX][3.0] Correct typos in the sub-modules: graphx, external, and examples

This is an automated email from the ASF dual-hosted git repository.

yamamuro pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 26c0404  [MINOR][GRAPHX][3.0] Correct typos in the sub-modules: graphx, external, and examples
26c0404 is described below

commit 26c0404214563bb558662e68ea73357c4f4021ed
Author: Josh Soref <js...@users.noreply.github.com>
AuthorDate: Tue Nov 17 15:25:42 2020 +0900

    [MINOR][GRAPHX][3.0] Correct typos in the sub-modules: graphx, external, and examples
    
    ### What changes were proposed in this pull request?
    
    This PR intends to fix typos in the sub-modules: graphx, external, and examples.
    Split per holdenk https://github.com/apache/spark/pull/30323#issuecomment-725159710
    
    NOTE: The misspellings have been reported at https://github.com/jsoref/spark/commit/706a726f87a0bbf5e31467fae9015218773db85b#commitcomment-44064356
    
    Backport of #30326
    
    ### Why are the changes needed?
    
    Misspelled words make it harder to read / understand content.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    No testing was performed
    
    Closes #30342 from jsoref/branch-3.0-30326.
    
    Authored-by: Josh Soref <js...@users.noreply.github.com>
    Signed-off-by: Takeshi Yamamuro <ya...@apache.org>
---
 .../apache/spark/examples/streaming/JavaCustomReceiver.java  |  2 +-
 .../spark/examples/streaming/JavaNetworkWordCount.java       |  2 +-
 .../examples/streaming/JavaRecoverableNetworkWordCount.java  |  2 +-
 .../spark/examples/streaming/JavaSqlNetworkWordCount.java    |  2 +-
 examples/src/main/python/ml/train_validation_split.py        |  2 +-
 examples/src/main/python/sql/arrow.py                        |  4 ++--
 .../main/python/streaming/recoverable_network_wordcount.py   |  2 +-
 examples/src/main/python/streaming/sql_network_wordcount.py  |  2 +-
 .../org/apache/spark/examples/streaming/CustomReceiver.scala |  2 +-
 .../apache/spark/examples/streaming/NetworkWordCount.scala   |  2 +-
 .../examples/streaming/RecoverableNetworkWordCount.scala     |  2 +-
 .../spark/examples/streaming/SqlNetworkWordCount.scala       |  2 +-
 .../spark/examples/streaming/StatefulNetworkWordCount.scala  |  2 +-
 .../apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala   |  2 +-
 .../spark/sql/kafka010/KafkaContinuousSourceSuite.scala      |  4 ++--
 .../spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala      | 12 ++++++------
 .../org/apache/spark/sql/kafka010/KafkaRelationSuite.scala   |  4 ++--
 .../scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala |  4 ++--
 .../org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala  |  2 +-
 .../spark/examples/streaming/JavaKinesisWordCountASL.java    |  2 +-
 .../main/python/examples/streaming/kinesis_wordcount_asl.py  |  2 +-
 .../spark/examples/streaming/KinesisWordCountASL.scala       |  6 +++---
 .../spark/streaming/kinesis/KinesisUtilsPythonHelper.scala   |  2 +-
 .../scala/org/apache/spark/graphx/lib/PageRankSuite.scala    |  6 +++---
 24 files changed, 37 insertions(+), 37 deletions(-)

diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
index 47692ec..f84a197 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
@@ -67,7 +67,7 @@ public class JavaCustomReceiver extends Receiver<String> {
     JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000));
 
     // Create an input stream with the custom receiver on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     JavaReceiverInputDStream<String> lines = ssc.receiverStream(
       new JavaCustomReceiver(args[0], Integer.parseInt(args[1])));
     JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
index b217672..d56134b 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
@@ -57,7 +57,7 @@ public final class JavaNetworkWordCount {
     JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
 
     // Create a JavaReceiverInputDStream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     // Note that no duplication in storage level only for running locally.
     // Replication necessary in distributed scenario for fault tolerance.
     JavaReceiverInputDStream<String> lines = ssc.socketTextStream(
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
index 45a876d..c354323 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
@@ -126,7 +126,7 @@ public final class JavaRecoverableNetworkWordCount {
     ssc.checkpoint(checkpointDirectory);
 
     // Create a socket stream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     JavaReceiverInputDStream<String> lines = ssc.socketTextStream(ip, port);
     JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());
     JavaPairDStream<String, Integer> wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1))
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
index 948d1a2..5d30698 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
@@ -59,7 +59,7 @@ public final class JavaSqlNetworkWordCount {
     JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
 
     // Create a JavaReceiverInputDStream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     // Note that no duplication in storage level only for running locally.
     // Replication necessary in distributed scenario for fault tolerance.
     JavaReceiverInputDStream<String> lines = ssc.socketTextStream(
diff --git a/examples/src/main/python/ml/train_validation_split.py b/examples/src/main/python/ml/train_validation_split.py
index d4f9184..5e3dc7b 100644
--- a/examples/src/main/python/ml/train_validation_split.py
+++ b/examples/src/main/python/ml/train_validation_split.py
@@ -17,7 +17,7 @@
 
 """
 This example demonstrates applying TrainValidationSplit to split data
-and preform model selection.
+and perform model selection.
 Run with:
 
   bin/spark-submit examples/src/main/python/ml/train_validation_split.py
diff --git a/examples/src/main/python/sql/arrow.py b/examples/src/main/python/sql/arrow.py
index b7d8467..b9fb29f 100644
--- a/examples/src/main/python/sql/arrow.py
+++ b/examples/src/main/python/sql/arrow.py
@@ -68,7 +68,7 @@ def ser_to_frame_pandas_udf_example(spark):
         s3['col2'] = s1 + s2.str.len()
         return s3
 
-    # Create a Spark DataFrame that has three columns including a sturct column.
+    # Create a Spark DataFrame that has three columns including a struct column.
     df = spark.createDataFrame(
         [[1, "a string", ("a nested string",)]],
         "long_col long, string_col string, struct_col struct<col1:string>")
@@ -308,7 +308,7 @@ if __name__ == "__main__":
     ser_to_frame_pandas_udf_example(spark)
     print("Running pandas_udf example: Series to Series")
     ser_to_ser_pandas_udf_example(spark)
-    print("Running pandas_udf example: Iterator of Series to Iterator of Seires")
+    print("Running pandas_udf example: Iterator of Series to Iterator of Series")
     iter_ser_to_iter_ser_pandas_udf_example(spark)
     print("Running pandas_udf example: Iterator of Multiple Series to Iterator of Series")
     iter_sers_to_iter_ser_pandas_udf_example(spark)
diff --git a/examples/src/main/python/streaming/recoverable_network_wordcount.py b/examples/src/main/python/streaming/recoverable_network_wordcount.py
index a39c4d0..e9f2db9 100644
--- a/examples/src/main/python/streaming/recoverable_network_wordcount.py
+++ b/examples/src/main/python/streaming/recoverable_network_wordcount.py
@@ -68,7 +68,7 @@ def createContext(host, port, outputPath):
     ssc = StreamingContext(sc, 1)
 
     # Create a socket stream on target ip:port and count the
-    # words in input stream of \n delimited text (eg. generated by 'nc')
+    # words in input stream of \n delimited text (e.g. generated by 'nc')
     lines = ssc.socketTextStream(host, port)
     words = lines.flatMap(lambda line: line.split(" "))
     wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py
index ab3cfc0..22fe260 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -54,7 +54,7 @@ if __name__ == "__main__":
     ssc = StreamingContext(sc, 1)
 
     # Create a socket stream on target ip:port and count the
-    # words in input stream of \n delimited text (eg. generated by 'nc')
+    # words in input stream of \n delimited text (e.g. generated by 'nc')
     lines = ssc.socketTextStream(host, int(port))
     words = lines.flatMap(lambda line: line.split(" "))
 
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
index 0f47dea..626f4b4 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
@@ -50,7 +50,7 @@ object CustomReceiver {
     val ssc = new StreamingContext(sparkConf, Seconds(1))
 
     // Create an input stream with the custom receiver on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     val lines = ssc.receiverStream(new CustomReceiver(args(0), args(1).toInt))
     val words = lines.flatMap(_.split(" "))
     val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
index 26bb51d..7d981df 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
@@ -47,7 +47,7 @@ object NetworkWordCount {
     val ssc = new StreamingContext(sparkConf, Seconds(1))
 
     // Create a socket stream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     // Note that no duplication in storage level only for running locally.
     // Replication necessary in distributed scenario for fault tolerance.
     val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
index 243c22e..98c1807 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
@@ -112,7 +112,7 @@ object RecoverableNetworkWordCount {
     ssc.checkpoint(checkpointDirectory)
 
     // Create a socket stream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     val lines = ssc.socketTextStream(ip, port)
     val words = lines.flatMap(_.split(" "))
     val wordCounts = words.map((_, 1)).reduceByKey(_ + _)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 778be7b..7daa001 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -51,7 +51,7 @@ object SqlNetworkWordCount {
     val ssc = new StreamingContext(sparkConf, Seconds(2))
 
     // Create a socket stream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
+    // words in input stream of \n delimited text (e.g. generated by 'nc')
     // Note that no duplication in storage level only for running locally.
     // Replication necessary in distributed scenario for fault tolerance.
     val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
index 46f01ed..8a5fcda 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
@@ -52,7 +52,7 @@ object StatefulNetworkWordCount {
     val initialRDD = ssc.sparkContext.parallelize(List(("hello", 1), ("world", 1)))
 
     // Create a ReceiverInputDStream on target ip:port and count the
-    // words in input stream of \n delimited test (eg. generated by 'nc')
+    // words in input stream of \n delimited test (e.g. generated by 'nc')
     val lines = ssc.socketTextStream(args(0), args(1).toInt)
     val words = lines.flatMap(_.split(" "))
     val wordDstream = words.map(x => (x, 1))
diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala
index 519a5cd..996e697 100644
--- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala
+++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala
@@ -44,7 +44,7 @@ abstract class DatabaseOnDocker {
   val env: Map[String, String]
 
   /**
-   * Wheather or not to use ipc mode for shared memory when starting docker image
+   * Whether or not to use ipc mode for shared memory when starting docker image
    */
   val usesIpc: Boolean
 
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala
index 76c2598..43cbe28 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala
@@ -33,7 +33,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo
     withTable(table) {
       val topic = newTopic()
       testUtils.createTopic(topic)
-      testUtils.withTranscationalProducer { producer =>
+      testUtils.withTransactionalProducer { producer =>
         val df = spark
           .readStream
           .format("kafka")
@@ -99,7 +99,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo
     withTable(table) {
       val topic = newTopic()
       testUtils.createTopic(topic)
-      testUtils.withTranscationalProducer { producer =>
+      testUtils.withTransactionalProducer { producer =>
         val df = spark
           .readStream
           .format("kafka")
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
index bdad214..2a3e135 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
@@ -567,7 +567,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
     val rows = spark.table("kafkaWatermark").collect()
     assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
     val row = rows(0)
-    // We cannot check the exact window start time as it depands on the time that messages were
+    // We cannot check the exact window start time as it depends on the time that messages were
     // inserted by the producer. So here we just use a low bound to make sure the internal
     // conversion works.
     assert(
@@ -833,7 +833,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
 
     val topicPartition = new TopicPartition(topic, 0)
     // The message values are the same as their offsets to make the test easy to follow
-    testUtils.withTranscationalProducer { producer =>
+    testUtils.withTransactionalProducer { producer =>
       testStream(mapped)(
         StartStream(Trigger.ProcessingTime(100), clock),
         waitUntilBatchProcessed,
@@ -956,7 +956,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
 
     val topicPartition = new TopicPartition(topic, 0)
     // The message values are the same as their offsets to make the test easy to follow
-    testUtils.withTranscationalProducer { producer =>
+    testUtils.withTransactionalProducer { producer =>
       testStream(mapped)(
         StartStream(Trigger.ProcessingTime(100), clock),
         waitUntilBatchProcessed,
@@ -1047,7 +1047,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
       .load()
       .select($"value".as[String])
 
-    testUtils.withTranscationalProducer { producer =>
+    testUtils.withTransactionalProducer { producer =>
       producer.beginTransaction()
       (0 to 3).foreach { i =>
         producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
@@ -1063,7 +1063,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
         // this case, if we forget to reset `FetchedData._nextOffsetInFetchedData` or
         // `FetchedData._offsetAfterPoll` (See SPARK-25495), the next batch will see incorrect
         // values and return wrong results hence fail the test.
-        testUtils.withTranscationalProducer { producer =>
+        testUtils.withTransactionalProducer { producer =>
           producer.beginTransaction()
           (4 to 7).foreach { i =>
             producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
@@ -1776,7 +1776,7 @@ abstract class KafkaSourceSuiteBase extends KafkaSourceTest {
     withTable(table) {
       val topic = newTopic()
       testUtils.createTopic(topic)
-      testUtils.withTranscationalProducer { producer =>
+      testUtils.withTransactionalProducer { producer =>
         val df = spark
           .readStream
           .format("kafka")
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
index e5f3a22..6f5dc0b 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
@@ -503,7 +503,7 @@ abstract class KafkaRelationSuiteBase extends QueryTest with SharedSparkSession
   test("read Kafka transactional messages: read_committed") {
     val topic = newTopic()
     testUtils.createTopic(topic)
-    testUtils.withTranscationalProducer { producer =>
+    testUtils.withTransactionalProducer { producer =>
       val df = spark
         .read
         .format("kafka")
@@ -552,7 +552,7 @@ abstract class KafkaRelationSuiteBase extends QueryTest with SharedSparkSession
   test("read Kafka transactional messages: read_uncommitted") {
     val topic = newTopic()
     testUtils.createTopic(topic)
-    testUtils.withTranscationalProducer { producer =>
+    testUtils.withTransactionalProducer { producer =>
       val df = spark
         .read
         .format("kafka")
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala
index a0a113c..942e2ad 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala
@@ -565,7 +565,7 @@ class KafkaTestUtils(
   }
 
   /** Call `f` with a `KafkaProducer` that has initialized transactions. */
-  def withTranscationalProducer(f: KafkaProducer[String, String] => Unit): Unit = {
+  def withTransactionalProducer(f: KafkaProducer[String, String] => Unit): Unit = {
     val props = producerConfiguration
     props.put("transactional.id", UUID.randomUUID().toString)
     val producer = new KafkaProducer[String, String](props)
@@ -614,7 +614,7 @@ class KafkaTestUtils(
     // ensure that logs from all replicas are deleted if delete topic is marked successful
     assert(servers.forall(server => topicAndPartitions.forall(tp =>
       server.getLogManager().getLog(tp).isEmpty)),
-      s"topic $topic still exists in log mananger")
+      s"topic $topic still exists in log manager")
     // ensure that topic is removed from all cleaner offsets
     assert(servers.forall(server => topicAndPartitions.forall { tp =>
       val checkpoints = server.getLogManager().liveLogDirs.map { logDir =>
diff --git a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala
index d6123e1..2053d36 100644
--- a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala
+++ b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala
@@ -42,7 +42,7 @@ class KafkaRDDSuite extends SparkFunSuite with BeforeAndAfterAll {
   private val sparkConf = new SparkConf().setMaster("local[4]")
     .setAppName(this.getClass.getSimpleName)
     // Set a timeout of 10 seconds that's going to be used to fetch topics/partitions from kafka.
-    // Othewise the poll timeout defaults to 2 minutes and causes test cases to run longer.
+    // Otherwise the poll timeout defaults to 2 minutes and causes test cases to run longer.
     .set("spark.streaming.kafka.consumer.poll.ms", "10000")
 
   private var sc: SparkContext = _
diff --git a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
index d704aeb..244873a 100644
--- a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
+++ b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
@@ -49,7 +49,7 @@ import com.amazonaws.services.kinesis.AmazonKinesisClient;
  *
  * Usage: JavaKinesisWordCountASL [app-name] [stream-name] [endpoint-url] [region-name]
  *   [app-name] is the name of the consumer app, used to track the read data in DynamoDB
- *   [stream-name] name of the Kinesis stream (ie. mySparkStream)
+ *   [stream-name] name of the Kinesis stream (i.e. mySparkStream)
  *   [endpoint-url] endpoint of the Kinesis service
  *     (e.g. https://kinesis.us-east-1.amazonaws.com)
  *
diff --git a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
index 5370b79..66c9ed8 100644
--- a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
+++ b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
@@ -23,7 +23,7 @@
 
   Usage: kinesis_wordcount_asl.py <app-name> <stream-name> <endpoint-url> <region-name>
     <app-name> is the name of the consumer app, used to track the read data in DynamoDB
-    <stream-name> name of the Kinesis stream (ie. mySparkStream)
+    <stream-name> name of the Kinesis stream (i.e. mySparkStream)
     <endpoint-url> endpoint of the Kinesis service
       (e.g. https://kinesis.us-east-1.amazonaws.com)
     <region-name> region name of the Kinesis endpoint (e.g. us-east-1)
diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
index bbb6008..d6a9160 100644
--- a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
+++ b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
@@ -43,7 +43,7 @@ import org.apache.spark.streaming.kinesis.KinesisInputDStream
  *
  * Usage: KinesisWordCountASL <app-name> <stream-name> <endpoint-url> <region-name>
  *   <app-name> is the name of the consumer app, used to track the read data in DynamoDB
- *   <stream-name> name of the Kinesis stream (ie. mySparkStream)
+ *   <stream-name> name of the Kinesis stream (i.e. mySparkStream)
  *   <endpoint-url> endpoint of the Kinesis service
  *     (e.g. https://kinesis.us-east-1.amazonaws.com)
  *
@@ -167,9 +167,9 @@ object KinesisWordCountASL extends Logging {
  * Usage: KinesisWordProducerASL <stream-name> <endpoint-url> \
  *   <records-per-sec> <words-per-record>
  *
- *   <stream-name> is the name of the Kinesis stream (ie. mySparkStream)
+ *   <stream-name> is the name of the Kinesis stream (i.e. mySparkStream)
  *   <endpoint-url> is the endpoint of the Kinesis service
- *     (ie. https://kinesis.us-east-1.amazonaws.com)
+ *     (i.e. https://kinesis.us-east-1.amazonaws.com)
  *   <records-per-sec> is the rate of records per second to put onto the stream
  *   <words-per-record> is the number of words per record
  *
diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala
index c89dedd..0056438 100644
--- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala
+++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala
@@ -46,7 +46,7 @@ private class KinesisUtilsPythonHelper {
     // scalastyle:on
     if (!(stsAssumeRoleArn != null && stsSessionName != null && stsExternalId != null)
         && !(stsAssumeRoleArn == null && stsSessionName == null && stsExternalId == null)) {
-      throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExtenalId " +
+      throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExternalId " +
         "must all be defined or all be null")
     }
     if (awsAccessKeyId == null && awsSecretKey != null) {
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala
index a5e2fc5..8008a89 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala
@@ -274,8 +274,8 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext {
     withSpark { sc =>
       // Check that implementation can handle large vertexIds, SPARK-25149
       val vertexIdOffset = Int.MaxValue.toLong + 1
-      val sourceOffest = 4
-      val source = vertexIdOffset + sourceOffest
+      val sourceOffset = 4
+      val source = vertexIdOffset + sourceOffset
       val numIter = 10
       val vertices = vertexIdOffset until vertexIdOffset + numIter
       val chain1 = vertices.zip(vertices.tail)
@@ -285,7 +285,7 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext {
       val tol = 0.0001
       val errorTol = 1.0e-1
 
-      val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffest))
+      val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffset))
       // We expect the rank to decay as (1 - resetProb) ^ distance
       val expectedRanks = sc.parallelize(vertices).map { vid =>
         val rank = if (vid < source) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org