You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by "rehevkor5 (via GitHub)" <gi...@apache.org> on 2023/02/17 19:07:14 UTC

[GitHub] [spark] rehevkor5 commented on a diff in pull request #29591: [SPARK-32714][PYTHON] Initial pyspark-stubs port.

rehevkor5 commented on code in PR #29591:
URL: https://github.com/apache/spark/pull/29591#discussion_r1110230025


##########
python/pyspark/sql/dataframe.pyi:
##########
@@ -0,0 +1,324 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from typing import overload
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    Iterator,
+    List,
+    Optional,
+    Tuple,
+    Union,
+)
+
+from py4j.java_gateway import JavaObject  # type: ignore[import]
+
+from pyspark.sql._typing import ColumnOrName, LiteralType, OptionalPrimitiveType
+from pyspark.sql.types import (  # noqa: F401
+    StructType,
+    StructField,
+    StringType,
+    IntegerType,
+    Row,
+)  # noqa: F401
+from pyspark.sql.context import SQLContext
+from pyspark.sql.group import GroupedData
+from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
+from pyspark.sql.streaming import DataStreamWriter
+from pyspark.sql.column import Column
+from pyspark.rdd import RDD
+from pyspark.storagelevel import StorageLevel
+
+from pyspark.sql.pandas.conversion import PandasConversionMixin
+from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
+
+class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
+    sql_ctx: SQLContext
+    is_cached: bool
+    def __init__(self, jdf: JavaObject, sql_ctx: SQLContext) -> None: ...
+    @property
+    def rdd(self) -> RDD[Row]: ...
+    @property
+    def na(self) -> DataFrameNaFunctions: ...
+    @property
+    def stat(self) -> DataFrameStatFunctions: ...
+    def toJSON(self, use_unicode: bool = ...) -> RDD[str]: ...
+    def registerTempTable(self, name: str) -> None: ...
+    def createTempView(self, name: str) -> None: ...
+    def createOrReplaceTempView(self, name: str) -> None: ...
+    def createGlobalTempView(self, name: str) -> None: ...
+    @property
+    def write(self) -> DataFrameWriter: ...
+    @property
+    def writeStream(self) -> DataStreamWriter: ...
+    @property
+    def schema(self) -> StructType: ...
+    def printSchema(self) -> None: ...
+    def explain(
+        self, extended: Optional[Union[bool, str]] = ..., mode: Optional[str] = ...
+    ) -> None: ...
+    def exceptAll(self, other: DataFrame) -> DataFrame: ...
+    def isLocal(self) -> bool: ...
+    @property
+    def isStreaming(self) -> bool: ...
+    def show(
+        self, n: int = ..., truncate: Union[bool, int] = ..., vertical: bool = ...
+    ) -> None: ...
+    def checkpoint(self, eager: bool = ...) -> DataFrame: ...
+    def localCheckpoint(self, eager: bool = ...) -> DataFrame: ...
+    def withWatermark(
+        self, eventTime: ColumnOrName, delayThreshold: str
+    ) -> DataFrame: ...
+    def hint(self, name: str, *parameters: Any) -> DataFrame: ...
+    def count(self) -> int: ...
+    def collect(self) -> List[Row]: ...
+    def toLocalIterator(self, prefetchPartitions: bool = ...) -> Iterator[Row]: ...
+    def limit(self, num: int) -> DataFrame: ...
+    def take(self, num: int) -> List[Row]: ...
+    def tail(self, num: int) -> List[Row]: ...
+    def foreach(self, f: Callable[[Row], None]) -> None: ...
+    def foreachPartition(self, f: Callable[[Iterator[Row]], None]) -> None: ...

Review Comment:
   Shouldn't this be `Iterable[Row]` instead of `Iterator[Row]`, to match https://github.com/apache/spark/pull/29591/files#diff-6349afe05d41878cc15995c96a14b011d6aef04b779e136f711eab989b71da6cR215 ?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org