You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2022/12/26 23:25:02 UTC

[GitHub] [spark] HyukjinKwon commented on a diff in pull request #39180: [SPARK-41649][CONNECT] Deduplicate docstrings in pyspark.sql.connect.window

HyukjinKwon commented on code in PR #39180:
URL: https://github.com/apache/spark/pull/39180#discussion_r1057361102


##########
python/pyspark/sql/connect/window.py:
##########
@@ -306,263 +217,27 @@ class Window:
 
     @staticmethod
     def partitionBy(*cols: Union["ColumnOrName", List["ColumnOrName"]]) -> "WindowSpec":
-        """
-        Creates a :class:`WindowSpec` with the partitioning defined.
-
-        .. versionadded:: 3.4.0
-
-        Parameters
-        ----------
-        cols : str, :class:`Column` or list
-            names of columns or expressions
-
-        Returns
-        -------
-        :class: `WindowSpec`
-            A :class:`WindowSpec` with the partitioning defined.
-
-        Examples
-        --------
-        >>> from pyspark.sql import Window
-        >>> from pyspark.sql.functions import row_number
-        >>> df = spark.createDataFrame(
-        ...      [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")], ["id", "category"])
-        >>> df.show()
-        +---+--------+
-        | id|category|
-        +---+--------+
-        |  1|       a|
-        |  1|       a|
-        |  2|       a|
-        |  1|       b|
-        |  2|       b|
-        |  3|       b|
-        +---+--------+
-
-        Show row number order by ``id`` in partition ``category``.
-
-        >>> window = Window.partitionBy("category").orderBy("id")
-        >>> df.withColumn("row_number", row_number().over(window)).show()
-        +---+--------+----------+
-        | id|category|row_number|
-        +---+--------+----------+
-        |  1|       a|         1|
-        |  1|       a|         2|
-        |  2|       a|         3|
-        |  1|       b|         1|
-        |  2|       b|         2|
-        |  3|       b|         3|
-        +---+--------+----------+
-        """
-
         return Window._spec.partitionBy(*cols)
 
+    partitionBy.__doc__ = PySparkWindow.partitionBy.__doc__
+
     @staticmethod
     def orderBy(*cols: Union["ColumnOrName", List["ColumnOrName"]]) -> "WindowSpec":
-        """
-        Creates a :class:`WindowSpec` with the ordering defined.
-
-        .. versionadded:: 3.4.0
-
-        Parameters
-        ----------
-        cols : str, :class:`Column` or list
-            names of columns or expressions
-
-        Returns
-        -------
-        :class: `WindowSpec`
-            A :class:`WindowSpec` with the ordering defined.
-
-        Examples
-        --------
-        >>> from pyspark.sql import Window
-        >>> from pyspark.sql.functions import row_number
-        >>> df = spark.createDataFrame(
-        ...      [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")], ["id", "category"])
-        >>> df.show()
-        +---+--------+
-        | id|category|
-        +---+--------+
-        |  1|       a|
-        |  1|       a|
-        |  2|       a|
-        |  1|       b|
-        |  2|       b|
-        |  3|       b|
-        +---+--------+
-
-        Show row number order by ``category`` in partition ``id``.
-
-        >>> window = Window.partitionBy("id").orderBy("category")
-        >>> df.withColumn("row_number", row_number().over(window)).show()
-        +---+--------+----------+
-        | id|category|row_number|
-        +---+--------+----------+
-        |  1|       a|         1|
-        |  1|       a|         2|
-        |  1|       b|         3|
-        |  2|       a|         1|
-        |  2|       b|         2|
-        |  3|       b|         1|
-        +---+--------+----------+
-        """
-
         return Window._spec.orderBy(*cols)
 
+    orderBy.__doc__ = PySparkWindow.orderBy.__doc__
+
     @staticmethod
     def rowsBetween(start: int, end: int) -> "WindowSpec":
-        """
-        Creates a :class:`WindowSpec` with the frame boundaries defined,
-        from `start` (inclusive) to `end` (inclusive).
-
-        Both `start` and `end` are relative positions from the current row.
-        For example, "0" means "current row", while "-1" means the row before
-        the current row, and "5" means the fifth row after the current row.
-
-        We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
-        and ``Window.currentRow`` to specify special boundary values, rather than using integral
-        values directly.
-
-        A row based boundary is based on the position of the row within the partition.
-        An offset indicates the number of rows above or below the current row, the frame for the
-        current row starts or ends. For instance, given a row based sliding frame with a lower bound
-        offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from
-        index 4 to index 7.
-
-        .. versionadded:: 3.4.0
-
-        Parameters
-        ----------
-        start : int
-            boundary start, inclusive.
-            The frame is unbounded if this is ``Window.unboundedPreceding``, or
-            any value less than or equal to -9223372036854775808.
-        end : int
-            boundary end, inclusive.
-            The frame is unbounded if this is ``Window.unboundedFollowing``, or
-            any value greater than or equal to 9223372036854775807.
-
-        Returns
-        -------
-        :class: `WindowSpec`
-            A :class:`WindowSpec` with the frame boundaries defined,
-            from `start` (inclusive) to `end` (inclusive).
-
-        Examples
-        --------
-        >>> from pyspark.sql import Window
-        >>> from pyspark.sql import functions as func
-        >>> df = spark.createDataFrame(
-        ...      [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")], ["id", "category"])
-        >>> df.show()
-        +---+--------+
-        | id|category|
-        +---+--------+
-        |  1|       a|
-        |  1|       a|
-        |  2|       a|
-        |  1|       b|
-        |  2|       b|
-        |  3|       b|
-        +---+--------+
-
-        Calculate sum of ``id`` in the range from currentRow to currentRow + 1
-        in partition ``category``
-
-        >>> window = Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1)
-        >>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category", "sum").show()
-        +---+--------+---+
-        | id|category|sum|
-        +---+--------+---+
-        |  1|       a|  2|
-        |  1|       a|  3|
-        |  1|       b|  3|
-        |  2|       a|  2|
-        |  2|       b|  5|
-        |  3|       b|  3|
-        +---+--------+---+
-
-        """
-
         return Window._spec.rowsBetween(start, end)
 
+    rowsBetween.__doc__ = PySparkWindow.rowsBetween.__doc__
+
     @staticmethod
     def rangeBetween(start: int, end: int) -> "WindowSpec":
-        """
-        Creates a :class:`WindowSpec` with the frame boundaries defined,
-        from `start` (inclusive) to `end` (inclusive).
-
-        Both `start` and `end` are relative from the current row. For example,
-        "0" means "current row", while "-1" means one off before the current row,
-        and "5" means the five off after the current row.
-
-        We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
-        and ``Window.currentRow`` to specify special boundary values, rather than using integral
-        values directly.
-
-        A range-based boundary is based on the actual value of the ORDER BY
-        expression(s). An offset is used to alter the value of the ORDER BY expression, for
-        instance if the current ORDER BY expression has a value of 10 and the lower bound offset
-        is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a
-        number of constraints on the ORDER BY expressions: there can be only one expression and this
-        expression must have a numerical data type. An exception can be made when the offset is
-        unbounded, because no value modification is needed, in this case multiple and non-numeric
-        ORDER BY expression are allowed.
-
-        .. versionadded:: 3.4.0
-
-        Parameters
-        ----------
-        start : int
-            boundary start, inclusive.
-            The frame is unbounded if this is ``Window.unboundedPreceding``, or
-            any value less than or equal to max(-sys.maxsize, -9223372036854775808).
-        end : int
-            boundary end, inclusive.
-            The frame is unbounded if this is ``Window.unboundedFollowing``, or
-            any value greater than or equal to min(sys.maxsize, 9223372036854775807).
-
-        Returns
-        -------
-        :class: `WindowSpec`
-            A :class:`WindowSpec` with the frame boundaries defined,
-            from `start` (inclusive) to `end` (inclusive).
-
-        Examples
-        --------
-        >>> from pyspark.sql import Window
-        >>> from pyspark.sql import functions as func
-        >>> df = spark.createDataFrame(
-        ...      [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")], ["id", "category"])
-        >>> df.show()
-        +---+--------+
-        | id|category|
-        +---+--------+
-        |  1|       a|
-        |  1|       a|
-        |  2|       a|
-        |  1|       b|
-        |  2|       b|
-        |  3|       b|
-        +---+--------+
-
-        Calculate sum of ``id`` in the range from ``id`` of currentRow to ``id`` of currentRow + 1
-        in partition ``category``
-
-        >>> window = Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1)
-        >>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category").show()
-        +---+--------+---+
-        | id|category|sum|
-        +---+--------+---+
-        |  1|       a|  4|
-        |  1|       a|  4|
-        |  1|       b|  3|
-        |  2|       a|  2|
-        |  2|       b|  5|
-        |  3|       b|  3|
-        +---+--------+---+
-
-        """
-
         return Window._spec.rangeBetween(start, end)
+
+    rangeBetween.__doc__ = PySparkWindow.rangeBetween.__doc__
+
+
+Window.__doc__ = Window.__doc__

Review Comment:
   Yes :-). Mind creating a followup?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org