You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Sandeep Singh (Jira)" <ji...@apache.org> on 2023/01/05 17:36:00 UTC

[jira] [Updated] (SPARK-41899) DataFrame.createDataFrame converting int to bigint

     [ https://issues.apache.org/jira/browse/SPARK-41899?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Sandeep Singh updated SPARK-41899:
----------------------------------
    Description: 
{code:java}
dt = datetime.date(2021, 12, 27)

# Note; number var in Python gets converted to LongType column;
# this is not supported by the function, so cast to Integer explicitly
df = self.spark.createDataFrame([Row(date=dt, add=2)], "date date, add integer")

self.assertTrue(
    all(
        df.select(
            date_add(df.date, df.add) == datetime.date(2021, 12, 29),
            date_add(df.date, "add") == datetime.date(2021, 12, 29),
            date_add(df.date, 3) == datetime.date(2021, 12, 30),
        ).first()
    )
){code}
{code:java}
Traceback (most recent call last):
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py", line 391, in test_date_add_function
    ).first()
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 246, in first
    return self.head()
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 310, in head
    rs = self.head(1)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 312, in head
    return self.take(n)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 317, in take
    return self.limit(num).collect()
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1076, in collect
    table = self._session.client.to_table(query)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 414, in to_table
    table, _ = self._execute_and_fetch(req)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 586, in _execute_and_fetch
    self._handle_error(rpc_error)
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 625, in _handle_error
    raise SparkConnectAnalysisException(
pyspark.sql.connect.client.SparkConnectAnalysisException: [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "date_add(date, add)" due to data type mismatch: Parameter 2 requires the ("INT" or "SMALLINT" or "TINYINT") type, however "add" has the type "BIGINT".
Plan: 'GlobalLimit 1
+- 'LocalLimit 1
   +- 'Project [unresolvedalias('`==`(date_add(date#753, add#754L), 2021-12-29), None), unresolvedalias('`==`(date_add(date#753, add#754L), 2021-12-29), None), (date_add(date#753, 3) = 2021-12-30) AS (date_add(date, 3) = DATE '2021-12-30')#759]
      +- Project [date#753, add#754L]
         +- Project [date#749 AS date#753, add#750L AS add#754L]
            +- LocalRelation [date#749, add#750L]{code}

  was:
{code:java}
from pyspark.sql.functions import assert_true

df = self.spark.range(3)

self.assertEqual(
    df.select(assert_true(df.id < 3)).toDF("val").collect(),
    [Row(val=None), Row(val=None), Row(val=None)],
)

with self.assertRaises(Py4JJavaError) as cm:
    df.select(assert_true(df.id < 2, "too big")).toDF("val").collect(){code}
{code:java}
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F

sel = df.select(
    df.value,
    df.key,
    F.max("key").over(w.rowsBetween(0, 1)),
    F.min("key").over(w.rowsBetween(0, 1)),
    F.count("key").over(w.rowsBetween(float("-inf"), float("inf"))),
    F.row_number().over(w),
    F.rank().over(w),
    F.dense_rank().over(w),
    F.ntile(2).over(w),
)
rs = sorted(sel.collect()){code}


> DataFrame.createDataFrame converting int to bigint
> --------------------------------------------------
>
>                 Key: SPARK-41899
>                 URL: https://issues.apache.org/jira/browse/SPARK-41899
>             Project: Spark
>          Issue Type: Sub-task
>          Components: Connect
>    Affects Versions: 3.4.0
>            Reporter: Sandeep Singh
>            Priority: Major
>
> {code:java}
> dt = datetime.date(2021, 12, 27)
> # Note; number var in Python gets converted to LongType column;
> # this is not supported by the function, so cast to Integer explicitly
> df = self.spark.createDataFrame([Row(date=dt, add=2)], "date date, add integer")
> self.assertTrue(
>     all(
>         df.select(
>             date_add(df.date, df.add) == datetime.date(2021, 12, 29),
>             date_add(df.date, "add") == datetime.date(2021, 12, 29),
>             date_add(df.date, 3) == datetime.date(2021, 12, 30),
>         ).first()
>     )
> ){code}
> {code:java}
> Traceback (most recent call last):
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py", line 391, in test_date_add_function
>     ).first()
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 246, in first
>     return self.head()
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 310, in head
>     rs = self.head(1)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 312, in head
>     return self.take(n)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 317, in take
>     return self.limit(num).collect()
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1076, in collect
>     table = self._session.client.to_table(query)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 414, in to_table
>     table, _ = self._execute_and_fetch(req)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 586, in _execute_and_fetch
>     self._handle_error(rpc_error)
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 625, in _handle_error
>     raise SparkConnectAnalysisException(
> pyspark.sql.connect.client.SparkConnectAnalysisException: [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "date_add(date, add)" due to data type mismatch: Parameter 2 requires the ("INT" or "SMALLINT" or "TINYINT") type, however "add" has the type "BIGINT".
> Plan: 'GlobalLimit 1
> +- 'LocalLimit 1
>    +- 'Project [unresolvedalias('`==`(date_add(date#753, add#754L), 2021-12-29), None), unresolvedalias('`==`(date_add(date#753, add#754L), 2021-12-29), None), (date_add(date#753, 3) = 2021-12-30) AS (date_add(date, 3) = DATE '2021-12-30')#759]
>       +- Project [date#753, add#754L]
>          +- Project [date#749 AS date#753, add#750L AS add#754L]
>             +- LocalRelation [date#749, add#750L]{code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org