You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2021/01/15 11:19:47 UTC

[spark] branch branch-2.4 updated: [SPARK-34118][CORE][SQL][2.4] Replaces filter and check for emptiness with exists or forall

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
     new 7ae6c8d  [SPARK-34118][CORE][SQL][2.4] Replaces filter and check for emptiness with exists or forall
7ae6c8d is described below

commit 7ae6c8d985b8f512555a9f373b7b445216006c53
Author: yangjie01 <ya...@baidu.com>
AuthorDate: Fri Jan 15 20:18:38 2021 +0900

    [SPARK-34118][CORE][SQL][2.4] Replaces filter and check for emptiness with exists or forall
    
    ### What changes were proposed in this pull request?
    This pr use `exists` or `forall` to simplify `filter + emptiness check`, it's semantically consistent, but looks simpler. The rule as follow:
    
    - `seq.filter(p).size == 0)` -> `!seq.exists(p)`
    - `seq.filter(p).length > 0` -> `seq.exists(p)`
    - `seq.filterNot(p).isEmpty` -> `seq.forall(p)`
    - `seq.filterNot(p).nonEmpty` -> `!seq.forall(p)`
    
    ### Why are the changes needed?
    Code Simpilefications.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass the Jenkins or GitHub Action
    
    Closes #31192 from LuciferYang/SPARK-34118-24.
    
    Authored-by: yangjie01 <ya...@baidu.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 core/src/main/scala/org/apache/spark/api/r/RUtils.scala               | 4 ++--
 core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala     | 2 +-
 sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala     | 2 +-
 .../org/apache/spark/sql/sources/v2/DataSourceV2UtilsSuite.scala      | 4 ++--
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/api/r/RUtils.scala b/core/src/main/scala/org/apache/spark/api/r/RUtils.scala
index 9bf35af..dec6caa 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RUtils.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RUtils.scala
@@ -43,9 +43,9 @@ private[spark] object RUtils {
    * Check if SparkR is installed before running tests that use SparkR.
    */
   def isSparkRInstalled: Boolean = {
-    localSparkRPackagePath.filter { pkgDir =>
+    localSparkRPackagePath.exists { pkgDir =>
       new File(Seq(pkgDir, "SparkR").mkString(File.separator)).exists
-    }.isDefined
+    }
   }
 
   /**
diff --git a/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala b/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala
index 1a3e880..d412462 100644
--- a/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala
@@ -338,7 +338,7 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging {
     assert(generatedFiles.size > 1)
     if (isCompressed) {
       assert(
-        generatedFiles.filter(_.getName.endsWith(RollingFileAppender.GZIP_LOG_SUFFIX)).size > 0)
+        generatedFiles.exists(_.getName.endsWith(RollingFileAppender.GZIP_LOG_SUFFIX)))
     }
     val allText = generatedFiles.map { file =>
       if (file.getName.endsWith(RollingFileAppender.GZIP_LOG_SUFFIX)) {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index dc61f72..022542b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -694,7 +694,7 @@ class JDBCSuite extends QueryTest
   test("Remap types via JdbcDialects") {
     JdbcDialects.registerDialect(testH2Dialect)
     val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", new Properties())
-    assert(df.schema.filter(_.dataType != org.apache.spark.sql.types.StringType).isEmpty)
+    assert(!df.schema.exists(_.dataType != org.apache.spark.sql.types.StringType))
     val rows = df.collect()
     assert(rows(0).get(0).isInstanceOf[String])
     assert(rows(0).get(1).isInstanceOf[String])
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2UtilsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2UtilsSuite.scala
index 4911e32..ffaa798 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2UtilsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2UtilsSuite.scala
@@ -36,8 +36,8 @@ class DataSourceV2UtilsSuite extends SparkFunSuite {
     val cs = classOf[DataSourceV2WithSessionConfig].newInstance()
     val confs = DataSourceV2Utils.extractSessionConfigs(cs.asInstanceOf[DataSourceV2], conf)
     assert(confs.size == 2)
-    assert(confs.keySet.filter(_.startsWith("spark.datasource")).size == 0)
-    assert(confs.keySet.filter(_.startsWith("not.exist.prefix")).size == 0)
+    assert(!confs.keySet.exists(_.startsWith("spark.datasource")))
+    assert(!confs.keySet.exists(_.startsWith("not.exist.prefix")))
     assert(confs.keySet.contains("foo.bar"))
     assert(confs.keySet.contains("whateverConfigName"))
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org