You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2021/04/13 02:22:13 UTC

[spark] branch branch-3.1 updated: [SPARK-35019][PYTHON][SQL] Fix type hints mismatches in pyspark.sql.*

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new edb1abf  [SPARK-35019][PYTHON][SQL] Fix type hints mismatches in pyspark.sql.*
edb1abf is described below

commit edb1abf260a47af7460f1f239fc44ef2ad433400
Author: Yikun Jiang <yi...@gmail.com>
AuthorDate: Tue Apr 13 11:21:13 2021 +0900

    [SPARK-35019][PYTHON][SQL] Fix type hints mismatches in pyspark.sql.*
    
    ### What changes were proposed in this pull request?
    Fix type hints mismatches in pyspark.sql.*
    
    ### Why are the changes needed?
    There were some mismatches in pyspark.sql.*
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    dev/lint-python passed.
    
    Closes #32122 from Yikun/SPARK-35019.
    
    Authored-by: Yikun Jiang <yi...@gmail.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
    (cherry picked from commit b43f7e6a974cac5aae401224c14b870c18fbded8)
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 python/pyspark/sql/catalog.pyi   | 3 ++-
 python/pyspark/sql/conf.pyi      | 2 +-
 python/pyspark/sql/context.pyi   | 3 ++-
 python/pyspark/sql/dataframe.py  | 2 +-
 python/pyspark/sql/dataframe.pyi | 2 +-
 python/pyspark/sql/functions.pyi | 2 +-
 6 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/python/pyspark/sql/catalog.pyi b/python/pyspark/sql/catalog.pyi
index 86263ff..cb5436d 100644
--- a/python/pyspark/sql/catalog.pyi
+++ b/python/pyspark/sql/catalog.pyi
@@ -17,6 +17,7 @@
 # under the License.
 
 from typing import Any, Callable, List, Optional
+from pyspark.sql._typing import UserDefinedFunctionLike
 from pyspark.sql.dataframe import DataFrame
 from pyspark.sql.session import SparkSession
 from pyspark.sql.types import DataType, StructType
@@ -53,7 +54,7 @@ class Catalog:
     def dropGlobalTempView(self, viewName: str) -> None: ...
     def registerFunction(
         self, name: str, f: Callable[..., Any], returnType: DataType = ...
-    ) -> None: ...
+    ) -> UserDefinedFunctionLike: ...
     def isCached(self, tableName: str) -> bool: ...
     def cacheTable(self, tableName: str) -> None: ...
     def uncacheTable(self, tableName: str) -> None: ...
diff --git a/python/pyspark/sql/conf.pyi b/python/pyspark/sql/conf.pyi
index 304dfcb..3e88f84 100644
--- a/python/pyspark/sql/conf.pyi
+++ b/python/pyspark/sql/conf.pyi
@@ -21,7 +21,7 @@ from py4j.java_gateway import JavaObject  # type: ignore[import]
 
 class RuntimeConfig:
     def __init__(self, jconf: JavaObject) -> None: ...
-    def set(self, key: str, value: str) -> str: ...
+    def set(self, key: str, value: str) -> None: ...
     def get(self, key: str, default: Optional[str] = ...) -> str: ...
     def unset(self, key: str) -> None: ...
     def isModifiable(self, key: str) -> bool: ...
diff --git a/python/pyspark/sql/context.pyi b/python/pyspark/sql/context.pyi
index 915a0fe..e8b61c8 100644
--- a/python/pyspark/sql/context.pyi
+++ b/python/pyspark/sql/context.pyi
@@ -15,6 +15,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+from pyspark.sql._typing import UserDefinedFunctionLike
 
 from typing import overload
 from typing import Any, Callable, Iterable, List, Optional, Tuple, TypeVar, Union
@@ -63,7 +64,7 @@ class SQLContext:
     ) -> DataFrame: ...
     def registerFunction(
         self, name: str, f: Callable[..., Any], returnType: DataType = ...
-    ) -> None: ...
+    ) -> UserDefinedFunctionLike: ...
     def registerJavaFunction(
         self, name: str, javaClassName: str, returnType: Optional[DataType] = ...
     ) -> None: ...
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 6313474..3085092 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -586,7 +586,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
 
         Parameters
         ----------
-        eventTime : str or :class:`Column`
+        eventTime : str
             the name of the column that contains the event time of the row.
         delayThreshold : str
             the minimum delay to wait to data to arrive late, relative to the
diff --git a/python/pyspark/sql/dataframe.pyi b/python/pyspark/sql/dataframe.pyi
index 1351c59..af1bac6 100644
--- a/python/pyspark/sql/dataframe.pyi
+++ b/python/pyspark/sql/dataframe.pyi
@@ -85,7 +85,7 @@ class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
     def checkpoint(self, eager: bool = ...) -> DataFrame: ...
     def localCheckpoint(self, eager: bool = ...) -> DataFrame: ...
     def withWatermark(
-        self, eventTime: ColumnOrName, delayThreshold: str
+        self, eventTime: str, delayThreshold: str
     ) -> DataFrame: ...
     def hint(self, name: str, *parameters: Union[PrimitiveType, List[PrimitiveType]]) -> DataFrame: ...
     def count(self) -> int: ...
diff --git a/python/pyspark/sql/functions.pyi b/python/pyspark/sql/functions.pyi
index f1043a0..5fec6fd 100644
--- a/python/pyspark/sql/functions.pyi
+++ b/python/pyspark/sql/functions.pyi
@@ -56,7 +56,7 @@ def monotonically_increasing_id() -> Column: ...
 def nanvl(col1: ColumnOrName, col2: ColumnOrName) -> Column: ...
 def percentile_approx(
     col: ColumnOrName,
-    percentage: Union[Column, float, List[float]],
+    percentage: Union[Column, float, List[float], tuple[float]],
     accuracy: Union[Column, float] = ...,
 ) -> Column: ...
 def rand(seed: Optional[int] = ...) -> Column: ...

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org