You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ru...@apache.org on 2024/03/12 07:53:01 UTC

(spark) branch master updated: [MINOR] Minor English fixes

This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new e1865811cac9 [MINOR] Minor English fixes
e1865811cac9 is described below

commit e1865811cac9c809c89b4b1512843e4b716c3e46
Author: Nicholas Chammas <ni...@gmail.com>
AuthorDate: Tue Mar 12 15:52:43 2024 +0800

    [MINOR] Minor English fixes
    
    ### What changes were proposed in this pull request?
    
    Minor English grammar and wording fixes.
    
    ### Why are the changes needed?
    
    They're not strictly needed, but give the project a tiny bit more polish.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, some user-facing errors have been tweaked.
    
    ### How was this patch tested?
    
    No testing beyond CI.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #45461 from nchammas/minor-wording-tweaks.
    
    Authored-by: Nicholas Chammas <ni...@gmail.com>
    Signed-off-by: Ruifeng Zheng <ru...@apache.org>
---
 common/utils/src/main/resources/error/error-classes.json          | 8 ++++----
 .../org/apache/spark/storage/BlockManagerMasterEndpoint.scala     | 6 +++---
 docs/sql-error-conditions.md                                      | 2 +-
 .../scala/org/apache/spark/sql/execution/ui/UISeleniumSuite.scala | 2 +-
 4 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/common/utils/src/main/resources/error/error-classes.json b/common/utils/src/main/resources/error/error-classes.json
index 99fbc585f981..93846e66df52 100644
--- a/common/utils/src/main/resources/error/error-classes.json
+++ b/common/utils/src/main/resources/error/error-classes.json
@@ -40,7 +40,7 @@
   "AMBIGUOUS_COLUMN_REFERENCE" : {
     "message" : [
       "Column <name> is ambiguous. It's because you joined several DataFrame together, and some of these DataFrames are the same.",
-      "This column points to one of the DataFrame but Spark is unable to figure out which one.",
+      "This column points to one of the DataFrames but Spark is unable to figure out which one.",
       "Please alias the DataFrames with different names via `DataFrame.alias` before joining them,",
       "and specify the column using qualified name, e.g. `df.alias(\"a\").join(df.alias(\"b\"), col(\"a.id\") > col(\"b.id\"))`."
     ],
@@ -6184,17 +6184,17 @@
   },
   "_LEGACY_ERROR_TEMP_2109" : {
     "message" : [
-      "Cannot build HashedRelation with more than 1/3 billions unique keys."
+      "Cannot build HashedRelation with more than 1/3 billion unique keys."
     ]
   },
   "_LEGACY_ERROR_TEMP_2110" : {
     "message" : [
-      "Can not build a HashedRelation that is larger than 8G."
+      "Cannot build a HashedRelation that is larger than 8G."
     ]
   },
   "_LEGACY_ERROR_TEMP_2111" : {
     "message" : [
-      "failed to push a row into <rowQueue>."
+      "Failed to push a row into <rowQueue>."
     ]
   },
   "_LEGACY_ERROR_TEMP_2112" : {
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
index b4920c7cb841..5dd536eeb304 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
@@ -323,11 +323,11 @@ class BlockManagerMasterEndpoint(
       val isAlive = try {
         driverEndpoint.askSync[Boolean](CoarseGrainedClusterMessages.IsExecutorAlive(executorId))
       } catch {
-        // ignore the non-fatal error from driverEndpoint since the caller doesn't really
-        // care about the return result of removing blocks. And so we could avoid breaking
+        // Ignore the non-fatal error from driverEndpoint since the caller doesn't really
+        // care about the return result of removing blocks. That way we avoid breaking
         // down the whole application.
         case NonFatal(e) =>
-          logError(s"Fail to know the executor $executorId is alive or not.", e)
+          logError(s"Cannot determine whether executor $executorId is alive or not.", e)
           false
       }
       if (!isAlive) {
diff --git a/docs/sql-error-conditions.md b/docs/sql-error-conditions.md
index b6b159f277c0..165f0a1d94af 100644
--- a/docs/sql-error-conditions.md
+++ b/docs/sql-error-conditions.md
@@ -71,7 +71,7 @@ Column or field `<name>` is ambiguous and has `<n>` matches.
 [SQLSTATE: 42702](sql-error-conditions-sqlstates.html#class-42-syntax-error-or-access-rule-violation)
 
 Column `<name>` is ambiguous. It's because you joined several DataFrame together, and some of these DataFrames are the same.
-This column points to one of the DataFrame but Spark is unable to figure out which one.
+This column points to one of the DataFrames but Spark is unable to figure out which one.
 Please alias the DataFrames with different names via `DataFrame.alias` before joining them,
 and specify the column using qualified name, e.g. `df.alias("a").join(df.alias("b"), col("a.id") > col("b.id"))`.
 
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/UISeleniumSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/UISeleniumSuite.scala
index dc617046c430..111e233c04e3 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/UISeleniumSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/UISeleniumSuite.scala
@@ -102,7 +102,7 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser {
   test("SPARK-44801: Analyzer failure shall show the query in failed table") {
     spark = creatSparkSessionWithUI
 
-    intercept[Exception](spark.sql("SELECT * FROM I_AM_A_INVISIBLE_TABLE").isEmpty)
+    intercept[Exception](spark.sql("SELECT * FROM I_AM_AN_INVISIBLE_TABLE").isEmpty)
     eventually(timeout(10.seconds), interval(100.milliseconds)) {
       val sd = findErrorMessageOnSQLUI()
       assert(sd.size === 1, "Analyze fail shall show the query in failed table")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org