You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ya...@apache.org on 2020/11/12 12:05:20 UTC
[spark] branch branch-2.4 updated: [MINOR][GRAPHX][2.4] Correct
typos in the sub-modules: graphx, external, and examples
This is an automated email from the ASF dual-hosted git repository.
yamamuro pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-2.4 by this push:
new 1e177c7 [MINOR][GRAPHX][2.4] Correct typos in the sub-modules: graphx, external, and examples
1e177c7 is described below
commit 1e177c73a26967b1effc1c8ba59c2fd57b52951f
Author: Josh Soref <js...@users.noreply.github.com>
AuthorDate: Thu Nov 12 21:02:27 2020 +0900
[MINOR][GRAPHX][2.4] Correct typos in the sub-modules: graphx, external, and examples
### What changes were proposed in this pull request?
This PR intends to fix typos in the sub-modules: graphx, external, and examples.
Split per holdenk https://github.com/apache/spark/pull/30323#issuecomment-725159710
NOTE: The misspellings have been reported at https://github.com/jsoref/spark/commit/706a726f87a0bbf5e31467fae9015218773db85b#commitcomment-44064356
Backport of #30326
### Why are the changes needed?
Misspelled words make it harder to read / understand content.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
No testing was performed
Closes #30343 from jsoref/branch-2.4-30326.
Authored-by: Josh Soref <js...@users.noreply.github.com>
Signed-off-by: Takeshi Yamamuro <ya...@apache.org>
---
.../apache/spark/examples/streaming/JavaCustomReceiver.java | 2 +-
.../spark/examples/streaming/JavaNetworkWordCount.java | 2 +-
.../examples/streaming/JavaRecoverableNetworkWordCount.java | 2 +-
.../spark/examples/streaming/JavaSqlNetworkWordCount.java | 2 +-
examples/src/main/python/ml/train_validation_split.py | 2 +-
.../main/python/streaming/recoverable_network_wordcount.py | 2 +-
examples/src/main/python/streaming/sql_network_wordcount.py | 2 +-
.../org/apache/spark/examples/streaming/CustomReceiver.scala | 2 +-
.../apache/spark/examples/streaming/NetworkWordCount.scala | 2 +-
.../examples/streaming/RecoverableNetworkWordCount.scala | 2 +-
.../spark/examples/streaming/SqlNetworkWordCount.scala | 2 +-
.../spark/examples/streaming/StatefulNetworkWordCount.scala | 2 +-
.../apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala | 2 +-
.../spark/sql/kafka010/KafkaContinuousSourceSuite.scala | 4 ++--
.../spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala | 12 ++++++------
.../org/apache/spark/sql/kafka010/KafkaRelationSuite.scala | 4 ++--
.../scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala | 4 ++--
.../spark/examples/streaming/JavaKinesisWordCountASL.java | 2 +-
.../main/python/examples/streaming/kinesis_wordcount_asl.py | 2 +-
.../spark/examples/streaming/KinesisWordCountASL.scala | 6 +++---
.../org/apache/spark/streaming/kinesis/KinesisUtils.scala | 2 +-
.../scala/org/apache/spark/graphx/lib/PageRankSuite.scala | 6 +++---
22 files changed, 34 insertions(+), 34 deletions(-)
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
index 47692ec..f84a197 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
@@ -67,7 +67,7 @@ public class JavaCustomReceiver extends Receiver<String> {
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000));
// Create an input stream with the custom receiver on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
JavaReceiverInputDStream<String> lines = ssc.receiverStream(
new JavaCustomReceiver(args[0], Integer.parseInt(args[1])));
JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
index b217672..d56134b 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
@@ -57,7 +57,7 @@ public final class JavaNetworkWordCount {
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
// Create a JavaReceiverInputDStream on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
// Note that no duplication in storage level only for running locally.
// Replication necessary in distributed scenario for fault tolerance.
JavaReceiverInputDStream<String> lines = ssc.socketTextStream(
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
index 45a876d..c354323 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
@@ -126,7 +126,7 @@ public final class JavaRecoverableNetworkWordCount {
ssc.checkpoint(checkpointDirectory);
// Create a socket stream on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
JavaReceiverInputDStream<String> lines = ssc.socketTextStream(ip, port);
JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());
JavaPairDStream<String, Integer> wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1))
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
index 948d1a2..5d30698 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
@@ -59,7 +59,7 @@ public final class JavaSqlNetworkWordCount {
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
// Create a JavaReceiverInputDStream on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
// Note that no duplication in storage level only for running locally.
// Replication necessary in distributed scenario for fault tolerance.
JavaReceiverInputDStream<String> lines = ssc.socketTextStream(
diff --git a/examples/src/main/python/ml/train_validation_split.py b/examples/src/main/python/ml/train_validation_split.py
index d4f9184..5e3dc7b 100644
--- a/examples/src/main/python/ml/train_validation_split.py
+++ b/examples/src/main/python/ml/train_validation_split.py
@@ -17,7 +17,7 @@
"""
This example demonstrates applying TrainValidationSplit to split data
-and preform model selection.
+and perform model selection.
Run with:
bin/spark-submit examples/src/main/python/ml/train_validation_split.py
diff --git a/examples/src/main/python/streaming/recoverable_network_wordcount.py b/examples/src/main/python/streaming/recoverable_network_wordcount.py
index 60167dc..0ac7d48 100644
--- a/examples/src/main/python/streaming/recoverable_network_wordcount.py
+++ b/examples/src/main/python/streaming/recoverable_network_wordcount.py
@@ -68,7 +68,7 @@ def createContext(host, port, outputPath):
ssc = StreamingContext(sc, 1)
# Create a socket stream on target ip:port and count the
- # words in input stream of \n delimited text (eg. generated by 'nc')
+ # words in input stream of \n delimited text (e.g. generated by 'nc')
lines = ssc.socketTextStream(host, port)
words = lines.flatMap(lambda line: line.split(" "))
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py
index ab3cfc0..22fe260 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -54,7 +54,7 @@ if __name__ == "__main__":
ssc = StreamingContext(sc, 1)
# Create a socket stream on target ip:port and count the
- # words in input stream of \n delimited text (eg. generated by 'nc')
+ # words in input stream of \n delimited text (e.g. generated by 'nc')
lines = ssc.socketTextStream(host, int(port))
words = lines.flatMap(lambda line: line.split(" "))
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
index fc3f8fa..357f019 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
@@ -50,7 +50,7 @@ object CustomReceiver {
val ssc = new StreamingContext(sparkConf, Seconds(1))
// Create an input stream with the custom receiver on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
val lines = ssc.receiverStream(new CustomReceiver(args(0), args(1).toInt))
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
index 15b57fc..b8cb1bd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
@@ -47,7 +47,7 @@ object NetworkWordCount {
val ssc = new StreamingContext(sparkConf, Seconds(1))
// Create a socket stream on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
// Note that no duplication in storage level only for running locally.
// Replication necessary in distributed scenario for fault tolerance.
val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
index f018f3a..f7fe9b0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
@@ -112,7 +112,7 @@ object RecoverableNetworkWordCount {
ssc.checkpoint(checkpointDirectory)
// Create a socket stream on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
val lines = ssc.socketTextStream(ip, port)
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map((_, 1)).reduceByKey(_ + _)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 787bbec..a4a7852 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -51,7 +51,7 @@ object SqlNetworkWordCount {
val ssc = new StreamingContext(sparkConf, Seconds(2))
// Create a socket stream on target ip:port and count the
- // words in input stream of \n delimited text (eg. generated by 'nc')
+ // words in input stream of \n delimited text (e.g. generated by 'nc')
// Note that no duplication in storage level only for running locally.
// Replication necessary in distributed scenario for fault tolerance.
val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
index 2811e67..40f7441 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
@@ -52,7 +52,7 @@ object StatefulNetworkWordCount {
val initialRDD = ssc.sparkContext.parallelize(List(("hello", 1), ("world", 1)))
// Create a ReceiverInputDStream on target ip:port and count the
- // words in input stream of \n delimited test (eg. generated by 'nc')
+ // words in input stream of \n delimited test (e.g. generated by 'nc')
val lines = ssc.socketTextStream(args(0), args(1).toInt)
val words = lines.flatMap(_.split(" "))
val wordDstream = words.map(x => (x, 1))
diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala
index 609696b..3b01f21 100644
--- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala
+++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala
@@ -46,7 +46,7 @@ abstract class DatabaseOnDocker {
val env: Map[String, String]
/**
- * Wheather or not to use ipc mode for shared memory when starting docker image
+ * Whether or not to use ipc mode for shared memory when starting docker image
*/
val usesIpc: Boolean
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala
index 649cb72..4b1be50 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala
@@ -33,7 +33,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo
withTable(table) {
val topic = newTopic()
testUtils.createTopic(topic)
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
val df = spark
.readStream
.format("kafka")
@@ -99,7 +99,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo
withTable(table) {
val topic = newTopic()
testUtils.createTopic(topic)
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
val df = spark
.readStream
.format("kafka")
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
index 36da2f1..fd61f3d 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
@@ -544,7 +544,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
val rows = spark.table("kafkaWatermark").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
- // We cannot check the exact window start time as it depands on the time that messages were
+ // We cannot check the exact window start time as it depends on the time that messages were
// inserted by the producer. So here we just use a low bound to make sure the internal
// conversion works.
assert(
@@ -740,7 +740,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
val topicPartition = new TopicPartition(topic, 0)
// The message values are the same as their offsets to make the test easy to follow
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
@@ -863,7 +863,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
val topicPartition = new TopicPartition(topic, 0)
// The message values are the same as their offsets to make the test easy to follow
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
@@ -954,7 +954,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
.load()
.select($"value".as[String])
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
producer.beginTransaction()
(0 to 3).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
@@ -970,7 +970,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
// this case, if we forget to reset `FetchedData._nextOffsetInFetchedData` or
// `FetchedData._offsetAfterPoll` (See SPARK-25495), the next batch will see incorrect
// values and return wrong results hence fail the test.
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
producer.beginTransaction()
(4 to 7).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
@@ -1472,7 +1472,7 @@ abstract class KafkaSourceSuiteBase extends KafkaSourceTest {
withTable(table) {
val topic = newTopic()
testUtils.createTopic(topic)
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
val df = spark
.readStream
.format("kafka")
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
index eb18697..4dc652b 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
@@ -239,7 +239,7 @@ class KafkaRelationSuite extends QueryTest with SharedSQLContext with KafkaTest
test("read Kafka transactional messages: read_committed") {
val topic = newTopic()
testUtils.createTopic(topic)
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
val df = spark
.read
.format("kafka")
@@ -288,7 +288,7 @@ class KafkaRelationSuite extends QueryTest with SharedSQLContext with KafkaTest
test("read Kafka transactional messages: read_uncommitted") {
val topic = newTopic()
testUtils.createTopic(topic)
- testUtils.withTranscationalProducer { producer =>
+ testUtils.withTransactionalProducer { producer =>
val df = spark
.read
.format("kafka")
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala
index bf6934b..c67188a 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala
@@ -348,7 +348,7 @@ class KafkaTestUtils(withBrokerProps: Map[String, Object] = Map.empty) extends L
}
/** Call `f` with a `KafkaProducer` that has initialized transactions. */
- def withTranscationalProducer(f: KafkaProducer[String, String] => Unit): Unit = {
+ def withTransactionalProducer(f: KafkaProducer[String, String] => Unit): Unit = {
val props = producerConfiguration
props.put("transactional.id", UUID.randomUUID().toString)
val producer = new KafkaProducer[String, String](props)
@@ -390,7 +390,7 @@ class KafkaTestUtils(withBrokerProps: Map[String, Object] = Map.empty) extends L
// ensure that logs from all replicas are deleted if delete topic is marked successful
assert(servers.forall(server => topicAndPartitions.forall(tp =>
server.getLogManager().getLog(tp).isEmpty)),
- s"topic $topic still exists in log mananger")
+ s"topic $topic still exists in log manager")
// ensure that topic is removed from all cleaner offsets
assert(servers.forall(server => topicAndPartitions.forall { tp =>
val checkpoints = server.getLogManager().liveLogDirs.map { logDir =>
diff --git a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
index 5a21253..6f58288 100644
--- a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
+++ b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
@@ -48,7 +48,7 @@ import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionIn
*
* Usage: JavaKinesisWordCountASL [app-name] [stream-name] [endpoint-url] [region-name]
* [app-name] is the name of the consumer app, used to track the read data in DynamoDB
- * [stream-name] name of the Kinesis stream (ie. mySparkStream)
+ * [stream-name] name of the Kinesis stream (i.e. mySparkStream)
* [endpoint-url] endpoint of the Kinesis service
* (e.g. https://kinesis.us-east-1.amazonaws.com)
*
diff --git a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
index 5370b79..66c9ed8 100644
--- a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
+++ b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
@@ -23,7 +23,7 @@
Usage: kinesis_wordcount_asl.py <app-name> <stream-name> <endpoint-url> <region-name>
<app-name> is the name of the consumer app, used to track the read data in DynamoDB
- <stream-name> name of the Kinesis stream (ie. mySparkStream)
+ <stream-name> name of the Kinesis stream (i.e. mySparkStream)
<endpoint-url> endpoint of the Kinesis service
(e.g. https://kinesis.us-east-1.amazonaws.com)
<region-name> region name of the Kinesis endpoint (e.g. us-east-1)
diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
index c78737d..755d5e9 100644
--- a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
+++ b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
@@ -43,7 +43,7 @@ import org.apache.spark.streaming.kinesis.KinesisInputDStream
*
* Usage: KinesisWordCountASL <app-name> <stream-name> <endpoint-url> <region-name>
* <app-name> is the name of the consumer app, used to track the read data in DynamoDB
- * <stream-name> name of the Kinesis stream (ie. mySparkStream)
+ * <stream-name> name of the Kinesis stream (i.e. mySparkStream)
* <endpoint-url> endpoint of the Kinesis service
* (e.g. https://kinesis.us-east-1.amazonaws.com)
*
@@ -167,9 +167,9 @@ object KinesisWordCountASL extends Logging {
* Usage: KinesisWordProducerASL <stream-name> <endpoint-url> \
* <records-per-sec> <words-per-record>
*
- * <stream-name> is the name of the Kinesis stream (ie. mySparkStream)
+ * <stream-name> is the name of the Kinesis stream (i.e. mySparkStream)
* <endpoint-url> is the endpoint of the Kinesis service
- * (ie. https://kinesis.us-east-1.amazonaws.com)
+ * (i.e. https://kinesis.us-east-1.amazonaws.com)
* <records-per-sec> is the rate of records per second to put onto the stream
* <words-per-record> is the number of words per record
*
diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala
index c60b989..8127060 100644
--- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala
+++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala
@@ -597,7 +597,7 @@ private class KinesisUtilsPythonHelper {
// scalastyle:on
if (!(stsAssumeRoleArn != null && stsSessionName != null && stsExternalId != null)
&& !(stsAssumeRoleArn == null && stsSessionName == null && stsExternalId == null)) {
- throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExtenalId " +
+ throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExternalId " +
"must all be defined or all be null")
}
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala
index 1e4c6c7..364c6f1 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala
@@ -205,8 +205,8 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext {
withSpark { sc =>
// Check that implementation can handle large vertexIds, SPARK-25149
val vertexIdOffset = Int.MaxValue.toLong + 1
- val sourceOffest = 4
- val source = vertexIdOffset + sourceOffest
+ val sourceOffset = 4
+ val source = vertexIdOffset + sourceOffset
val numIter = 10
val vertices = vertexIdOffset until vertexIdOffset + numIter
val chain1 = vertices.zip(vertices.tail)
@@ -216,7 +216,7 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext {
val tol = 0.0001
val errorTol = 1.0e-1
- val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffest))
+ val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffset))
// We expect the rank to decay as (1 - resetProb) ^ distance
val expectedRanks = sc.parallelize(vertices).map { vid =>
val rank = if (vid < source) {
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org