You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2021/02/02 00:28:07 UTC

[spark] branch branch-3.0 updated: [SPARK-34310][CORE][SQL][3.0] Replaces map and flatten with flatMap

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new a8e8ff6  [SPARK-34310][CORE][SQL][3.0] Replaces map and flatten with flatMap
a8e8ff6 is described below

commit a8e8ff60e902a503e3a2143c65fc6bac45cf52ab
Author: yangjie01 <ya...@baidu.com>
AuthorDate: Tue Feb 2 09:27:45 2021 +0900

    [SPARK-34310][CORE][SQL][3.0] Replaces map and flatten with flatMap
    
    ### What changes were proposed in this pull request?
    Replaces `collection.map(f1).flatten(f2)` with `collection.flatMap` if possible. it's semantically consistent, but looks simpler.
    
    ### Why are the changes needed?
    Code Simpilefications.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass the Jenkins or GitHub Action
    
    Closes #31418 from LuciferYang/SPARK-34310-30.
    
    Authored-by: yangjie01 <ya...@baidu.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala          | 2 +-
 .../spark/sql/execution/datasources/HadoopFileLinesReaderSuite.scala  | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala
index d5a811d..1dec977 100644
--- a/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala
@@ -124,7 +124,7 @@ private[spark] class SubtractedRDD[K: ClassTag, V: ClassTag, W: ClassTag](
     integrate(0, t => getSeq(t._1) += t._2)
     // the second dep is rdd2; remove all of its keys
     integrate(1, t => map.remove(t._1))
-    map.asScala.iterator.map(t => t._2.iterator.map((t._1, _))).flatten
+    map.asScala.iterator.flatMap(t => t._2.iterator.map((t._1, _)))
   }
 
   override def clearDependencies(): Unit = {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReaderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReaderSuite.scala
index 3c83388..771ddbd 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReaderSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReaderSuite.scala
@@ -36,13 +36,13 @@ class HadoopFileLinesReaderSuite extends SharedSparkSession {
     val delimOpt = delimiter.map(_.getBytes(StandardCharsets.UTF_8))
     Files.write(path.toPath, text.getBytes(StandardCharsets.UTF_8))
 
-    val lines = ranges.map { case (start, length) =>
+    val lines = ranges.flatMap { case (start, length) =>
       val file = PartitionedFile(InternalRow.empty, path.getCanonicalPath, start, length)
       val hadoopConf = conf.getOrElse(spark.sessionState.newHadoopConf())
       val reader = new HadoopFileLinesReader(file, delimOpt, hadoopConf)
 
       reader.map(_.toString)
-    }.flatten
+    }
 
     lines
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org