You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Sandeep Singh (Jira)" <ji...@apache.org> on 2023/01/05 17:57:00 UTC

[jira] [Updated] (SPARK-41906) Handle Function `rand() `

     [ https://issues.apache.org/jira/browse/SPARK-41906?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Sandeep Singh updated SPARK-41906:
----------------------------------
    Description: 
{code:java}
df = self.df
from pyspark.sql import functions

rnd = df.select("key", functions.rand()).collect()
for row in rnd:
    assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select("key", functions.randn(5)).collect()
for row in rndn:
    assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]

# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select("key", functions.rand(0)).collect()
rnd2 = df.select("key", functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))

rndn1 = df.select("key", functions.randn(0)).collect()
rndn2 = df.select("key", functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2)){code}
{code:java}
Traceback (most recent call last):
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py", line 299, in test_rand_functions
    rnd = df.select("key", functions.rand()).collect()
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2917, in select
    jdf = self._jdf.select(self._jcols(*cols))
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2537, in _jcols
    return self._jseq(cols, _to_java_column)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2524, in _jseq
    return _to_seq(self.sparkSession._sc, cols, converter)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 86, in _to_seq
    cols = [converter(c) for c in cols]
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 86, in <listcomp>
    cols = [converter(c) for c in cols]
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 65, in _to_java_column
    raise TypeError(
TypeError: Invalid argument, not a string or column: Column<'rand()'> of type <class 'pyspark.sql.connect.column.Column'>. For column literals, use 'lit', 'array', 'struct' or 'create_map' function.
{code}

  was:
{code:java}
df = self.spark.createDataFrame(
    [
        (
            [1, 2, 3],
            2,
            2,
        ),
        (
            [4, 5],
            2,
            2,
        ),
    ],
    ["x", "index", "len"],
)

expected = [Row(sliced=[2, 3]), Row(sliced=[5])]
self.assertTrue(
    all(
        [
            df.select(slice(df.x, 2, 2).alias("sliced")).collect() == expected,
            df.select(slice(df.x, lit(2), lit(2)).alias("sliced")).collect() == expected,
            df.select(slice("x", "index", "len").alias("sliced")).collect() == expected,
        ]
    )
)

self.assertEqual(
    df.select(slice(df.x, size(df.x) - 1, lit(1)).alias("sliced")).collect(),
    [Row(sliced=[2]), Row(sliced=[4])],
)
self.assertEqual(
    df.select(slice(df.x, lit(1), size(df.x) - 1).alias("sliced")).collect(),
    [Row(sliced=[1, 2]), Row(sliced=[4])],
){code}
{code:java}
 Traceback (most recent call last):
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py", line 596, in test_slice
    df.select(slice("x", "index", "len").alias("sliced")).collect() == expected,
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/utils.py", line 332, in wrapped
    return getattr(functions, f.__name__)(*args, **kwargs)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", line 1525, in slice
    raise TypeError(f"start should be a Column or int, but got {type(start).__name__}")
TypeError: start should be a Column or int, but got str{code}


> Handle Function `rand() `
> -------------------------
>
>                 Key: SPARK-41906
>                 URL: https://issues.apache.org/jira/browse/SPARK-41906
>             Project: Spark
>          Issue Type: Sub-task
>          Components: Connect
>    Affects Versions: 3.4.0
>            Reporter: Sandeep Singh
>            Priority: Major
>
> {code:java}
> df = self.df
> from pyspark.sql import functions
> rnd = df.select("key", functions.rand()).collect()
> for row in rnd:
>     assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
> rndn = df.select("key", functions.randn(5)).collect()
> for row in rndn:
>     assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
> # If the specified seed is 0, we should use it.
> # https://issues.apache.org/jira/browse/SPARK-9691
> rnd1 = df.select("key", functions.rand(0)).collect()
> rnd2 = df.select("key", functions.rand(0)).collect()
> self.assertEqual(sorted(rnd1), sorted(rnd2))
> rndn1 = df.select("key", functions.randn(0)).collect()
> rndn2 = df.select("key", functions.randn(0)).collect()
> self.assertEqual(sorted(rndn1), sorted(rndn2)){code}
> {code:java}
> Traceback (most recent call last):
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py", line 299, in test_rand_functions
>     rnd = df.select("key", functions.rand()).collect()
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2917, in select
>     jdf = self._jdf.select(self._jcols(*cols))
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2537, in _jcols
>     return self._jseq(cols, _to_java_column)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2524, in _jseq
>     return _to_seq(self.sparkSession._sc, cols, converter)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 86, in _to_seq
>     cols = [converter(c) for c in cols]
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 86, in <listcomp>
>     cols = [converter(c) for c in cols]
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 65, in _to_java_column
>     raise TypeError(
> TypeError: Invalid argument, not a string or column: Column<'rand()'> of type <class 'pyspark.sql.connect.column.Column'>. For column literals, use 'lit', 'array', 'struct' or 'create_map' function.
> {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org