You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2023/02/08 01:44:54 UTC

[spark] branch master updated: [SPARK-42244][PYTHON] Refine error classes and messages

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 839c56a8d96 [SPARK-42244][PYTHON] Refine error classes and messages
839c56a8d96 is described below

commit 839c56a8d96e0885821c492407dbe2801398f2b8
Author: itholic <ha...@databricks.com>
AuthorDate: Wed Feb 8 10:44:40 2023 +0900

    [SPARK-42244][PYTHON] Refine error classes and messages
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to refine error classes and messages by using the Python type object name for error class name and its messages.
    
    nit - Also add missing dots at the end of error messages.
    
    ### Why are the changes needed?
    
    The type name was inconsistent in the previous error messages, e.g. string and str, int and integer, and the sometimes the error class name is too long to read.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    The existing CI should pass.
    
    Closes #39815 from itholic/SPARK-42244.
    
    Authored-by: itholic <ha...@databricks.com>
    Signed-off-by: Hyukjin Kwon <gu...@apache.org>
---
 python/pyspark/errors/error_classes.py             | 82 +++++++++++-----------
 python/pyspark/sql/connect/dataframe.py            | 28 ++++----
 python/pyspark/sql/connect/functions.py            | 34 ++++-----
 python/pyspark/sql/dataframe.py                    | 66 ++++++++---------
 python/pyspark/sql/functions.py                    | 22 +++---
 .../sql/tests/connect/test_connect_function.py     | 14 ++--
 python/pyspark/sql/tests/test_dataframe.py         | 24 +++----
 python/pyspark/sql/tests/test_functions.py         | 30 ++++----
 8 files changed, 150 insertions(+), 150 deletions(-)

diff --git a/python/pyspark/errors/error_classes.py b/python/pyspark/errors/error_classes.py
index ab65600eb31..9d0f1c6e769 100644
--- a/python/pyspark/errors/error_classes.py
+++ b/python/pyspark/errors/error_classes.py
@@ -39,99 +39,99 @@ ERROR_CLASSES_JSON = """
       "Function `<func_name>` should return Column, got <return_type>."
     ]
   },
-  "NOT_AN_INTEGER" : {
+  "NOT_BOOL" : {
     "message" : [
-      "Argument `<arg_name>` should be a integer, got <arg_type>."
+      "Argument `<arg_name>` should be a bool, got <arg_type>."
     ]
   },
-  "NOT_A_BOOLEAN" : {
+  "NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE" : {
     "message" : [
-      "Argument `<arg_name>` should be a boolean, got <arg_type>."
+      "Argument `<arg_name>` should be a bool, dict, float, int, str or tuple, got <arg_type>."
     ]
   },
-  "NOT_A_COLUMN" : {
+  "NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a Column, got <arg_type>."
+      "Argument `<arg_name>` should be a bool, dict, float, int or str, got <arg_type>."
     ]
   },
-  "NOT_A_DATAFRAME" : {
+  "NOT_BOOL_OR_LIST" : {
     "message" : [
-      "Argument `<arg_name>` must be a DataFrame, got <arg_type>."
+      "Argument `<arg_name>` should be a bool or list, got <arg_type>."
     ]
   },
-  "NOT_A_DICT" : {
+  "NOT_BOOL_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a dict, got <arg_type>."
+      "Argument `<arg_name>` should be a bool or str, got <arg_type>."
     ]
   },
-  "NOT_A_STRING" : {
+  "NOT_COLUMN" : {
     "message" : [
-      "Argument `<arg_name>` should be a str, got <arg_type>."
+      "Argument `<arg_name>` should be a Column, got <arg_type>."
     ]
   },
-  "NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_LIST_OR_STRING_OR_TUPLE" : {
+  "NOT_COLUMN_OR_DATATYPE_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a boolean, dict, float, integer, string or tuple, got <arg_type>."
+      "Argument `<arg_name>` should be a Column, str or DataType, but got <arg_type>."
     ]
   },
-  "NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_STRING" : {
+  "NOT_COLUMN_OR_FLOAT_OR_INT_OR_LIST_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a boolean, dict, float, integer or string, got <arg_type>."
+      "Argument `<arg_name>` should be a column, float, integer, list or string, got <arg_type>."
     ]
   },
-  "NOT_BOOL_OR_LIST" : {
+  "NOT_COLUMN_OR_INT" : {
     "message" : [
-      "Argument `<arg_name>` should be a boolean or list, got <arg_type>."
+      "Argument `<arg_name>` should be a Column or int, got <arg_type>."
     ]
   },
-  "NOT_BOOL_OR_STRING" : {
+  "NOT_COLUMN_OR_INT_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a boolean or string, got <arg_type>."
+      "Argument `<arg_name>` should be a Column, int or str, got <arg_type>."
     ]
   },
-  "NOT_COLUMN_OR_DATATYPE_OR_STRING" : {
+  "NOT_COLUMN_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a Column or str or DataType, but got <arg_type>."
+      "Argument `<arg_name>` should be a Column or str, got <arg_type>."
     ]
   },
-  "NOT_COLUMN_OR_FLOAT_OR_INTEGER_OR_LIST_OR_STRING" : {
+  "NOT_DATAFRAME" : {
     "message" : [
-      "Argument `<arg_name>` should be a column, float, integer, list or string, got <arg_type>."
+      "Argument `<arg_name>` must be a DataFrame, got <arg_type>."
     ]
   },
-  "NOT_COLUMN_OR_INTEGER" : {
+  "NOT_DICT" : {
     "message" : [
-      "Argument `<arg_name>` should be a Column or int, got <arg_type>."
+      "Argument `<arg_name>` should be a dict, got <arg_type>."
     ]
   },
-  "NOT_COLUMN_OR_INTEGER_OR_STRING" : {
+  "NOT_FLOAT_OR_INT" : {
     "message" : [
-      "Argument `<arg_name>` should be a Column, int or str, got <arg_type>."
+      "Argument `<arg_name>` should be a float or int, got <arg_type>."
     ]
   },
-  "NOT_COLUMN_OR_STRING" : {
+  "NOT_FLOAT_OR_INT_OR_LIST_OR_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a Column or str, got <arg_type>."
+      "Argument `<arg_name>` should be a float, int, list or str, got <arg_type>."
     ]
   },
-  "NOT_FLOAT_OR_INTEGER" : {
+  "NOT_INT" : {
     "message" : [
-      "Argument `<arg_name>` should be a float or integer, got <arg_type>."
+      "Argument `<arg_name>` should be an int, got <arg_type>."
     ]
   },
-  "NOT_FLOAT_OR_INTEGER_OR_LIST_OR_STRING" : {
+  "NOT_LIST_OR_STR_OR_TUPLE" : {
     "message" : [
-      "Argument `<arg_name>` should be a float, integer, list or string, got <arg_type>."
+      "Argument `<arg_name>` should be a list, str or tuple, got <arg_type>."
     ]
   },
-  "NOT_LIST_OR_STRING_OR_TUPLE" : {
+  "NOT_LIST_OR_TUPLE" : {
     "message" : [
-      "Argument `<arg_name>` should be a list, string or tuple, got <arg_type>."
+      "Argument `<arg_name>` should be a list or tuple, got <arg_type>."
     ]
   },
-  "NOT_LIST_OR_TUPLE" : {
+  "NOT_STR" : {
     "message" : [
-      "Argument `<arg_name>` should be a list or tuple, got <arg_type>."
+      "Argument `<arg_name>` should be a str, got <arg_type>."
     ]
   },
   "UNSUPPORTED_NUMPY_ARRAY_SCALAR" : {
@@ -141,17 +141,17 @@ ERROR_CLASSES_JSON = """
   },
   "UNSUPPORTED_PARAM_TYPE_FOR_HIGHER_ORDER_FUNCTION" : {
     "message" : [
-      "Function `<func_name>` should use only POSITIONAL or POSITIONAL OR KEYWORD arguments"
+      "Function `<func_name>` should use only POSITIONAL or POSITIONAL OR KEYWORD arguments."
     ]
   },
   "WRONG_NUM_ARGS_FOR_HIGHER_ORDER_FUNCTION" : {
     "message" : [
-      "Function `<func_name>` should take between 1 and 3 arguments, but provided function takes <num_args>"
+      "Function `<func_name>` should take between 1 and 3 arguments, but provided function takes <num_args>."
     ]
   },
   "WRONG_NUM_COLUMNS" : {
     "message" : [
-      "Function `<func_name>` should take at least <num_cols> columns"
+      "Function `<func_name>` should take at least <num_cols> columns."
     ]
   }
 }
diff --git a/python/pyspark/sql/connect/dataframe.py b/python/pyspark/sql/connect/dataframe.py
index 9b0c911c10f..0c3eb5b8934 100644
--- a/python/pyspark/sql/connect/dataframe.py
+++ b/python/pyspark/sql/connect/dataframe.py
@@ -146,7 +146,7 @@ class DataFrame:
     def colRegex(self, colName: str) -> Column:
         if not isinstance(colName, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "colName", "arg_type": type(colName).__name__},
             )
         return Column(UnresolvedRegex(colName))
@@ -235,7 +235,7 @@ class DataFrame:
             )
         else:
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={
                     "arg_name": "numPartitions",
                     "arg_type": type(numPartitions).__name__,
@@ -577,7 +577,7 @@ class DataFrame:
     def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
         if not isinstance(colsMap, dict):
             raise PySparkTypeError(
-                error_class="NOT_A_DICT",
+                error_class="NOT_DICT",
                 message_parameters={"arg_name": "colsMap", "arg_type": type(colsMap).__name__},
             )
 
@@ -590,12 +590,12 @@ class DataFrame:
     ) -> str:
         if not isinstance(n, int) or isinstance(n, bool):
             raise PySparkTypeError(
-                error_class="NOT_AN_INTEGER",
+                error_class="NOT_INT",
                 message_parameters={"arg_name": "n", "arg_type": type(n).__name__},
             )
         if not isinstance(vertical, bool):
             raise PySparkTypeError(
-                error_class="NOT_A_BOOLEAN",
+                error_class="NOT_BOOL",
                 message_parameters={"arg_name": "vertical", "arg_type": type(vertical).__name__},
             )
 
@@ -607,7 +607,7 @@ class DataFrame:
                 _truncate = int(truncate)
             except ValueError:
                 raise PySparkTypeError(
-                    error_class="NOT_A_BOOLEAN",
+                    error_class="NOT_BOOL",
                     message_parameters={
                         "arg_name": "truncate",
                         "arg_type": type(truncate).__name__,
@@ -839,7 +839,7 @@ class DataFrame:
     def where(self, condition: Union[Column, str]) -> "DataFrame":
         if not isinstance(condition, (str, Column)):
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={"arg_name": "condition", "arg_type": type(condition).__name__},
             )
         return self.filter(condition)
@@ -859,7 +859,7 @@ class DataFrame:
     ) -> "DataFrame":
         if not isinstance(value, (float, int, str, bool, dict)):
             raise PySparkTypeError(
-                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_STRING",
+                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR",
                 message_parameters={"arg_name": "value", "arg_type": type(value).__name__},
             )
         if isinstance(value, dict):
@@ -946,7 +946,7 @@ class DataFrame:
                 _cols = list(subset)
             else:
                 raise PySparkTypeError(
-                    error_class="NOT_LIST_OR_STRING_OR_TUPLE",
+                    error_class="NOT_LIST_OR_STR_OR_TUPLE",
                     message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
                 )
 
@@ -1000,7 +1000,7 @@ class DataFrame:
         valid_types = (bool, float, int, str, list, tuple)
         if not isinstance(to_replace, valid_types + (dict,)):
             raise PySparkTypeError(
-                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_LIST_OR_STRING_OR_TUPLE",
+                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE",
                 message_parameters={
                     "arg_name": "to_replace",
                     "arg_type": type(to_replace).__name__,
@@ -1100,12 +1100,12 @@ class DataFrame:
     def cov(self, col1: str, col2: str) -> float:
         if not isinstance(col1, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col1", "arg_type": type(col1).__name__},
             )
         if not isinstance(col2, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col2", "arg_type": type(col2).__name__},
             )
         pdf = DataFrame.withPlan(
@@ -1227,12 +1227,12 @@ class DataFrame:
             col = Column(ColumnReference(col))
         elif not isinstance(col, Column):
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
             )
         if not isinstance(fractions, dict):
             raise PySparkTypeError(
-                error_class="NOT_A_DICT",
+                error_class="NOT_DICT",
                 message_parameters={"arg_name": "fractions", "arg_type": type(fractions).__name__},
             )
         for k, v in fractions.items():
diff --git a/python/pyspark/sql/connect/functions.py b/python/pyspark/sql/connect/functions.py
index 509dd61bfdf..1c9e740474b 100644
--- a/python/pyspark/sql/connect/functions.py
+++ b/python/pyspark/sql/connect/functions.py
@@ -265,7 +265,7 @@ def broadcast(df: "DataFrame") -> "DataFrame":
 
     if not isinstance(df, DataFrame):
         raise PySparkTypeError(
-            error_class="NOT_A_DATAFRAME",
+            error_class="NOT_DATAFRAME",
             message_parameters={"arg_name": "df", "arg_type": type(df).__name__},
         )
     return df.hint("broadcast")
@@ -378,7 +378,7 @@ def when(condition: Column, value: Any) -> Column:
     # Explicitly not using ColumnOrName type here to make reading condition less opaque
     if not isinstance(condition, Column):
         raise PySparkTypeError(
-            error_class="NOT_A_COLUMN",
+            error_class="NOT_COLUMN",
             message_parameters={"arg_name": "condition", "arg_type": type(condition).__name__},
         )
 
@@ -1360,7 +1360,7 @@ def from_csv(
         _schema = lit(schema)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "schema", "arg_type": type(schema).__name__},
         )
 
@@ -1386,7 +1386,7 @@ def from_json(
         _schema = lit(schema)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_DATATYPE_OR_STRING",
+            error_class="NOT_COLUMN_OR_DATATYPE_OR_STR",
             message_parameters={"arg_name": "schema", "arg_type": type(schema).__name__},
         )
 
@@ -1547,7 +1547,7 @@ def schema_of_csv(csv: "ColumnOrName", options: Optional[Dict[str, str]] = None)
         _csv = lit(csv)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "csv", "arg_type": type(csv).__name__},
         )
 
@@ -1567,7 +1567,7 @@ def schema_of_json(json: "ColumnOrName", options: Optional[Dict[str, str]] = Non
         _json = lit(json)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "json", "arg_type": type(json).__name__},
         )
 
@@ -1603,7 +1603,7 @@ def slice(
         _start = lit(start)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "start", "arg_type": type(start).__name__},
         )
 
@@ -1613,7 +1613,7 @@ def slice(
         _length = lit(length)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "length", "arg_type": type(length).__name__},
         )
 
@@ -1805,12 +1805,12 @@ def overlay(
 ) -> Column:
     if not isinstance(pos, (int, str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "pos", "arg_type": type(pos).__name__},
         )
     if len is not None and not isinstance(len, (int, str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "len", "arg_type": type(len).__name__},
         )
 
@@ -2227,7 +2227,7 @@ def window(
 ) -> Column:
     if windowDuration is None or not isinstance(windowDuration, str):
         raise PySparkTypeError(
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={
                 "arg_name": "windowDuration",
                 "arg_type": type(windowDuration).__name__,
@@ -2235,7 +2235,7 @@ def window(
         )
     if slideDuration is not None and not isinstance(slideDuration, str):
         raise PySparkTypeError(
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={
                 "arg_name": "slideDuration",
                 "arg_type": type(slideDuration).__name__,
@@ -2243,7 +2243,7 @@ def window(
         )
     if startTime is not None and not isinstance(startTime, str):
         raise PySparkTypeError(
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "startTime", "arg_type": type(startTime).__name__},
         )
 
@@ -2278,7 +2278,7 @@ window_time.__doc__ = pysparkfuncs.window_time.__doc__
 def session_window(timeColumn: "ColumnOrName", gapDuration: Union[Column, str]) -> Column:
     if gapDuration is None or not isinstance(gapDuration, (Column, str)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "gapDuration", "arg_type": type(gapDuration).__name__},
         )
 
@@ -2303,7 +2303,7 @@ def bucket(numBuckets: Union[Column, int], col: "ColumnOrName") -> Column:
         _numBuckets = numBuckets
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER",
+            error_class="NOT_COLUMN_OR_INT",
             message_parameters={
                 "arg_name": "numBuckets",
                 "arg_type": type(numBuckets).__name__,
@@ -2351,7 +2351,7 @@ def assert_true(col: "ColumnOrName", errMsg: Optional[Union[Column, str]] = None
         return _invoke_function_over_columns("assert_true", col)
     if not isinstance(errMsg, (str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "errMsg", "arg_type": type(errMsg).__name__},
         )
     _err_msg = lit(errMsg) if isinstance(errMsg, str) else _to_col(errMsg)
@@ -2364,7 +2364,7 @@ assert_true.__doc__ = pysparkfuncs.assert_true.__doc__
 def raise_error(errMsg: Union[Column, str]) -> Column:
     if not isinstance(errMsg, (str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "errMsg", "arg_type": type(errMsg).__name__},
         )
     _err_msg = lit(errMsg) if isinstance(errMsg, str) else _to_col(errMsg)
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 77a35ca8240..e794bb94e75 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -695,7 +695,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
             if (extended is not None) and (not isinstance(extended, (bool, str))):
                 raise PySparkTypeError(
-                    error_class="NOT_BOOL_OR_STRING",
+                    error_class="NOT_BOOL_OR_STR",
                     message_parameters={
                         "arg_name": "extended",
                         "arg_type": type(extended).__name__,
@@ -703,7 +703,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
                 )
             if (mode is not None) and (not isinstance(mode, str)):
                 raise PySparkTypeError(
-                    error_class="NOT_A_STRING",
+                    error_class="NOT_STR",
                     message_parameters={"arg_name": "mode", "arg_type": type(mode).__name__},
                 )
 
@@ -901,13 +901,13 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         if not isinstance(n, int) or isinstance(n, bool):
             raise PySparkTypeError(
-                error_class="NOT_AN_INTEGER",
+                error_class="NOT_INT",
                 message_parameters={"arg_name": "n", "arg_type": type(n).__name__},
             )
 
         if not isinstance(vertical, bool):
             raise PySparkTypeError(
-                error_class="NOT_A_BOOLEAN",
+                error_class="NOT_BOOL",
                 message_parameters={"arg_name": "vertical", "arg_type": type(vertical).__name__},
             )
 
@@ -918,7 +918,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
                 int_truncate = int(truncate)
             except ValueError:
                 raise PySparkTypeError(
-                    error_class="NOT_A_BOOLEAN",
+                    error_class="NOT_BOOL",
                     message_parameters={
                         "arg_name": "truncate",
                         "arg_type": type(truncate).__name__,
@@ -1104,12 +1104,12 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not eventTime or type(eventTime) is not str:
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "eventTime", "arg_type": type(eventTime).__name__},
             )
         if not delayThreshold or type(delayThreshold) is not str:
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={
                     "arg_name": "delayThreshold",
                     "arg_type": type(delayThreshold).__name__,
@@ -1163,7 +1163,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         if not isinstance(name, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "name", "arg_type": type(name).__name__},
             )
 
@@ -1657,7 +1657,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
             return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sparkSession)
         else:
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={
                     "arg_name": "numPartitions",
                     "arg_type": type(numPartitions).__name__,
@@ -1730,7 +1730,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
             return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sparkSession)
         else:
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+                error_class="NOT_COLUMN_OR_INT_OR_STR",
                 message_parameters={
                     "arg_name": "numPartitions",
                     "arg_type": type(numPartitions).__name__,
@@ -1910,12 +1910,12 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
             col = Column(col)
         elif not isinstance(col, Column):
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
             )
         if not isinstance(fractions, dict):
             raise PySparkTypeError(
-                error_class="NOT_A_DICT",
+                error_class="NOT_DICT",
                 message_parameters={"arg_name": "fractions", "arg_type": type(fractions).__name__},
             )
         for k, v in fractions.items():
@@ -2061,7 +2061,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(colName, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "colName", "arg_type": type(colName).__name__},
             )
         jc = self._jdf.colRegex(colName)
@@ -2890,7 +2890,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
             return Column(jc)
         else:
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_FLOAT_OR_INTEGER_OR_LIST_OR_STRING",
+                error_class="NOT_COLUMN_OR_FLOAT_OR_INT_OR_LIST_OR_STR",
                 message_parameters={"arg_name": "item", "arg_type": type(item).__name__},
             )
 
@@ -3089,7 +3089,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
             jdf = self._jdf.filter(condition._jc)
         else:
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={"arg_name": "condition", "arg_type": type(condition).__name__},
             )
         return DataFrame(jdf, self.sparkSession)
@@ -3937,7 +3937,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
             subset = [subset]
         elif not isinstance(subset, (list, tuple)):
             raise PySparkTypeError(
-                error_class="NOT_LIST_OR_STRING_OR_TUPLE",
+                error_class="NOT_LIST_OR_STR_OR_TUPLE",
                 message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
             )
 
@@ -4036,7 +4036,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(value, (float, int, str, bool, dict)):
             raise PySparkTypeError(
-                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_STRING",
+                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR",
                 message_parameters={"arg_name": "value", "arg_type": type(value).__name__},
             )
 
@@ -4221,7 +4221,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         valid_types = (bool, float, int, str, list, tuple)
         if not isinstance(to_replace, valid_types + (dict,)):
             raise PySparkTypeError(
-                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_LIST_OR_STRING_OR_TUPLE",
+                error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE",
                 message_parameters={
                     "arg_name": "to_replace",
                     "arg_type": type(to_replace).__name__,
@@ -4248,7 +4248,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         if not (subset is None or isinstance(subset, (list, tuple, str))):
             raise PySparkTypeError(
-                error_class="NOT_LIST_OR_STRING_OR_TUPLE",
+                error_class="NOT_LIST_OR_STR_OR_TUPLE",
                 message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
             )
 
@@ -4364,7 +4364,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         if not isinstance(col, (str, list, tuple)):
             raise PySparkTypeError(
-                error_class="NOT_LIST_OR_STRING_OR_TUPLE",
+                error_class="NOT_LIST_OR_STR_OR_TUPLE",
                 message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
             )
 
@@ -4405,7 +4405,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         if not isinstance(relativeError, (float, int)):
             raise PySparkTypeError(
-                error_class="NOT_FLOAT_OR_INTEGER",
+                error_class="NOT_FLOAT_OR_INT",
                 message_parameters={
                     "arg_name": "relativeError",
                     "arg_type": type(relativeError).__name__,
@@ -4453,12 +4453,12 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(col1, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col1", "arg_type": type(col1).__name__},
             )
         if not isinstance(col2, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col2", "arg_type": type(col2).__name__},
             )
         if not method:
@@ -4501,12 +4501,12 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(col1, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col1", "arg_type": type(col1).__name__},
             )
         if not isinstance(col2, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col2", "arg_type": type(col2).__name__},
             )
         return self._jdf.stat().cov(col1, col2)
@@ -4554,12 +4554,12 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(col1, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col1", "arg_type": type(col1).__name__},
             )
         if not isinstance(col2, str):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "col2", "arg_type": type(col2).__name__},
             )
         return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sparkSession)
@@ -4659,7 +4659,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         if not isinstance(colsMap, dict):
             raise PySparkTypeError(
-                error_class="NOT_A_DICT",
+                error_class="NOT_DICT",
                 message_parameters={"arg_name": "colsMap", "arg_type": type(colsMap).__name__},
             )
 
@@ -4716,7 +4716,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(col, Column):
             raise PySparkTypeError(
-                error_class="NOT_A_COLUMN",
+                error_class="NOT_COLUMN",
                 message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
             )
         return DataFrame(self._jdf.withColumn(colName, col._jc), self.sparkSession)
@@ -4795,7 +4795,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(colsMap, dict):
             raise PySparkTypeError(
-                error_class="NOT_A_DICT",
+                error_class="NOT_DICT",
                 message_parameters={"arg_name": "colsMap", "arg_type": type(colsMap).__name__},
             )
 
@@ -4827,7 +4827,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(metadata, dict):
             raise PySparkTypeError(
-                error_class="NOT_A_DICT",
+                error_class="NOT_DICT",
                 message_parameters={"arg_name": "metadata", "arg_type": type(metadata).__name__},
             )
         sc = SparkContext._active_spark_context
@@ -4904,7 +4904,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
                 jdf = self._jdf.drop(col._jc)
             else:
                 raise PySparkTypeError(
-                    error_class="NOT_COLUMN_OR_STRING",
+                    error_class="NOT_COLUMN_OR_STR",
                     message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
                 )
         else:
@@ -5048,7 +5048,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
         """
         if not isinstance(other, DataFrame):
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "other", "arg_type": type(other).__name__},
             )
         return self._jdf.sameSemantics(other._jdf)
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index 157432e07b0..8bee517de6a 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -3832,7 +3832,7 @@ def when(condition: Column, value: Any) -> Column:
     # Explicitly not using ColumnOrName type here to make reading condition less opaque
     if not isinstance(condition, Column):
         raise PySparkTypeError(
-            error_class="NOT_A_COLUMN",
+            error_class="NOT_COLUMN",
             message_parameters={"arg_name": "condition", "arg_type": type(condition).__name__},
         )
     v = value._jc if isinstance(value, Column) else value
@@ -5470,7 +5470,7 @@ def window(
     def check_string_field(field, fieldName):  # type: ignore[no-untyped-def]
         if not field or type(field) is not str:
             raise PySparkTypeError(
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": fieldName, "arg_type": type(field).__name__},
             )
 
@@ -5589,7 +5589,7 @@ def session_window(timeColumn: "ColumnOrName", gapDuration: Union[Column, str])
     def check_field(field: Union[Column, str], fieldName: str) -> None:
         if field is None or not isinstance(field, (str, Column)):
             raise PySparkTypeError(
-                error_class="NOT_COLUMN_OR_STRING",
+                error_class="NOT_COLUMN_OR_STR",
                 message_parameters={"arg_name": fieldName, "arg_type": type(field).__name__},
             )
 
@@ -5853,7 +5853,7 @@ def assert_true(col: "ColumnOrName", errMsg: Optional[Union[Column, str]] = None
         return _invoke_function_over_columns("assert_true", col)
     if not isinstance(errMsg, (str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "errMsg", "arg_type": type(errMsg).__name__},
         )
 
@@ -5893,7 +5893,7 @@ def raise_error(errMsg: Union[Column, str]) -> Column:
     """
     if not isinstance(errMsg, (str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "errMsg", "arg_type": type(errMsg).__name__},
         )
 
@@ -6435,12 +6435,12 @@ def overlay(
     """
     if not isinstance(pos, (int, str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "pos", "arg_type": type(pos).__name__},
         )
     if len is not None and not isinstance(len, (int, str, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "len", "arg_type": type(len).__name__},
         )
 
@@ -8381,7 +8381,7 @@ def schema_of_json(json: "ColumnOrName", options: Optional[Dict[str, str]] = Non
         col = _to_java_column(json)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "json", "arg_type": type(json).__name__},
         )
 
@@ -8428,7 +8428,7 @@ def schema_of_csv(csv: "ColumnOrName", options: Optional[Dict[str, str]] = None)
         col = _to_java_column(csv)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "csv", "arg_type": type(csv).__name__},
         )
 
@@ -9163,7 +9163,7 @@ def from_csv(
         schema = _to_java_column(schema)
     else:
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "schema", "arg_type": type(schema).__name__},
         )
 
@@ -9974,7 +9974,7 @@ def bucket(numBuckets: Union[Column, int], col: "ColumnOrName") -> Column:
     """
     if not isinstance(numBuckets, (int, Column)):
         raise PySparkTypeError(
-            error_class="NOT_COLUMN_OR_INTEGER",
+            error_class="NOT_COLUMN_OR_INT",
             message_parameters={"arg_name": "numBuckets", "arg_type": type(numBuckets).__name__},
         )
 
diff --git a/python/pyspark/sql/tests/connect/test_connect_function.py b/python/pyspark/sql/tests/connect/test_connect_function.py
index 0832c9739d6..e3e668eb835 100644
--- a/python/pyspark/sql/tests/connect/test_connect_function.py
+++ b/python/pyspark/sql/tests/connect/test_connect_function.py
@@ -169,7 +169,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_DATAFRAME",
+            error_class="NOT_DATAFRAME",
             message_parameters={"arg_name": "df", "arg_type": "Column"},
         )
 
@@ -370,7 +370,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_COLUMN",
+            error_class="NOT_COLUMN",
             message_parameters={"arg_name": "condition", "arg_type": "bool"},
         )
 
@@ -1182,7 +1182,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "start", "arg_type": "float"},
         )
 
@@ -1191,7 +1191,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "length", "arg_type": "float"},
         )
 
@@ -1791,7 +1791,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_DATATYPE_OR_STRING",
+            error_class="NOT_COLUMN_OR_DATATYPE_OR_STR",
             message_parameters={"arg_name": "schema", "arg_type": "list"},
         )
 
@@ -2196,7 +2196,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "slideDuration", "arg_type": "int"},
         )
 
@@ -2205,7 +2205,7 @@ class SparkConnectFunctionTests(ReusedConnectTestCase, PandasOnSparkTestUtils, S
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "startTime", "arg_type": "int"},
         )
 
diff --git a/python/pyspark/sql/tests/test_dataframe.py b/python/pyspark/sql/tests/test_dataframe.py
index dfd2f305bf8..033878470e1 100644
--- a/python/pyspark/sql/tests/test_dataframe.py
+++ b/python/pyspark/sql/tests/test_dataframe.py
@@ -122,7 +122,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_DICT",
+            error_class="NOT_DICT",
             message_parameters={"arg_name": "colsMap", "arg_type": "tuple"},
         )
 
@@ -225,7 +225,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_LIST_OR_STRING_OR_TUPLE",
+            error_class="NOT_LIST_OR_STR_OR_TUPLE",
             message_parameters={"arg_name": "subset", "arg_type": "int"},
         )
 
@@ -305,7 +305,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_STRING",
+            error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR",
             message_parameters={"arg_name": "value", "arg_type": "list"},
         )
 
@@ -356,7 +356,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "numPartitions", "arg_type": "list"},
         )
 
@@ -555,7 +555,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INTEGER_OR_LIST_OR_STRING_OR_TUPLE",
+            error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE",
             message_parameters={"arg_name": "to_replace", "arg_type": "function"},
         )
 
@@ -1489,7 +1489,7 @@ class DataFrameTestsMixin:
 
             self.check_error(
                 exception=pe.exception,
-                error_class="NOT_A_STRING",
+                error_class="NOT_STR",
                 message_parameters={"arg_name": "other", "arg_type": "int"},
             )
 
@@ -1525,7 +1525,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_AN_INTEGER",
+            error_class="NOT_INT",
             message_parameters={"arg_name": "n", "arg_type": "bool"},
         )
 
@@ -1534,7 +1534,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_BOOLEAN",
+            error_class="NOT_BOOL",
             message_parameters={"arg_name": "vertical", "arg_type": "str"},
         )
 
@@ -1543,7 +1543,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_BOOLEAN",
+            error_class="NOT_BOOL",
             message_parameters={"arg_name": "truncate", "arg_type": "str"},
         )
 
@@ -1615,7 +1615,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "numPartitions", "arg_type": "list"},
         )
 
@@ -1625,7 +1625,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "colName", "arg_type": "int"},
         )
 
@@ -1635,7 +1635,7 @@ class DataFrameTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "condition", "arg_type": "int"},
         )
 
diff --git a/python/pyspark/sql/tests/test_functions.py b/python/pyspark/sql/tests/test_functions.py
index f8098d185af..05492347755 100644
--- a/python/pyspark/sql/tests/test_functions.py
+++ b/python/pyspark/sql/tests/test_functions.py
@@ -218,7 +218,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "col", "arg_type": "int"},
         )
 
@@ -227,7 +227,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_DICT",
+            error_class="NOT_DICT",
             message_parameters={"arg_name": "fractions", "arg_type": "list"},
         )
 
@@ -255,7 +255,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "col1", "arg_type": "int"},
         )
 
@@ -264,7 +264,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "col2", "arg_type": "bool"},
         )
 
@@ -764,7 +764,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "pos", "arg_type": "float"},
         )
 
@@ -773,7 +773,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_INTEGER_OR_STRING",
+            error_class="NOT_COLUMN_OR_INT_OR_STR",
             message_parameters={"arg_name": "len", "arg_type": "float"},
         )
 
@@ -1076,7 +1076,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "errMsg", "arg_type": "int"},
         )
 
@@ -1096,7 +1096,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "errMsg", "arg_type": "NoneType"},
         )
 
@@ -1280,7 +1280,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "json", "arg_type": "int"},
         )
 
@@ -1290,7 +1290,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "csv", "arg_type": "int"},
         )
 
@@ -1301,7 +1301,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "schema", "arg_type": "int"},
         )
 
@@ -1322,7 +1322,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_COLUMN",
+            error_class="NOT_COLUMN",
             message_parameters={"arg_name": "condition", "arg_type": "str"},
         )
 
@@ -1332,7 +1332,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_A_STRING",
+            error_class="NOT_STR",
             message_parameters={"arg_name": "windowDuration", "arg_type": "int"},
         )
 
@@ -1342,7 +1342,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_STRING",
+            error_class="NOT_COLUMN_OR_STR",
             message_parameters={"arg_name": "gapDuration", "arg_type": "int"},
         )
 
@@ -1352,7 +1352,7 @@ class FunctionsTestsMixin:
 
         self.check_error(
             exception=pe.exception,
-            error_class="NOT_COLUMN_OR_INTEGER",
+            error_class="NOT_COLUMN_OR_INT",
             message_parameters={"arg_name": "numBuckets", "arg_type": "str"},
         )
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org