You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Sandeep Singh (Jira)" <ji...@apache.org> on 2023/01/04 02:53:00 UTC

[jira] [Updated] (SPARK-41869) DataFrame dropDuplicates should throw error on non list argument

     [ https://issues.apache.org/jira/browse/SPARK-41869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Sandeep Singh updated SPARK-41869:
----------------------------------
    Description: 
{code:java}
df = self.spark.createDataFrame([("Alice", 50), ("Alice", 60)], ["name", "age"])

# shouldn't drop a non-null row
self.assertEqual(df.dropDuplicates().count(), 2)

self.assertEqual(df.dropDuplicates(["name"]).count(), 1)

self.assertEqual(df.dropDuplicates(["name", "age"]).count(), 2)

type_error_msg = "Parameter 'subset' must be a list of columns"
with self.assertRaisesRegex(TypeError, type_error_msg):
    df.dropDuplicates("name"){code}
{code:java}
Traceback (most recent call last):
  File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_dataframe.py", line 128, in test_drop_duplicates
    with self.assertRaisesRegex(TypeError, type_error_msg):
AssertionError: TypeError not raised{code}

  was:
{code:java}
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", line 1270, in pyspark.sql.connect.functions.explode
Failed example:
    eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
Exception raised:
    Traceback (most recent call last):
      File "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py", line 1350, in __run
        exec(compile(example.source, filename, "single",
      File "<doctest pyspark.sql.connect.functions.explode[3]>", line 1, in <module>
        eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 534, in show
        print(self._show_string(n, truncate, vertical))
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 423, in _show_string
        ).toPandas()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1031, in toPandas
        return self._session.client.to_pandas(query)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 413, in to_pandas
        return self._execute_and_fetch(req)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 573, in _execute_and_fetch
        self._handle_error(rpc_error)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 619, in _handle_error
        raise SparkConnectAnalysisException(
    pyspark.sql.connect.client.SparkConnectAnalysisException: [INVALID_COLUMN_OR_FIELD_DATA_TYPE] Column or field `mapfield` is of type "STRUCT<a: STRING>" while it's required to be "MAP<STRING, STRING>".
    Plan:  {code}
{code:java}
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", line 1364, in pyspark.sql.connect.functions.inline
Failed example:
    df.select(inline(df.structlist)).show()
Exception raised:
    Traceback (most recent call last):
      File "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py", line 1350, in __run
        exec(compile(example.source, filename, "single",
      File "<doctest pyspark.sql.connect.functions.inline[2]>", line 1, in <module>
        df.select(inline(df.structlist)).show()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 534, in show
        print(self._show_string(n, truncate, vertical))
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 423, in _show_string
        ).toPandas()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1031, in toPandas
        return self._session.client.to_pandas(query)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 413, in to_pandas
        return self._execute_and_fetch(req)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 573, in _execute_and_fetch
        self._handle_error(rpc_error)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 619, in _handle_error
        raise SparkConnectAnalysisException(
    pyspark.sql.connect.client.SparkConnectAnalysisException: [INVALID_COLUMN_OR_FIELD_DATA_TYPE] Column or field `structlist`.`element` is of type "ARRAY<BIGINT>" while it's required to be "STRUCT<a: BIGINT, b: BIGINT>".
    Plan:  {code}
{code:java}
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", line 1411, in pyspark.sql.connect.functions.map_filter
Failed example:
    df.select(map_filter(
        "data", lambda _, v: v > 30.0).alias("data_filtered")
    ).show(truncate=False)
Exception raised:
    Traceback (most recent call last):
      File "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py", line 1350, in __run
        exec(compile(example.source, filename, "single",
      File "<doctest pyspark.sql.connect.functions.map_filter[1]>", line 1, in <module>
        df.select(map_filter(
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 534, in show
        print(self._show_string(n, truncate, vertical))
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 423, in _show_string
        ).toPandas()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1031, in toPandas
        return self._session.client.to_pandas(query)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 413, in to_pandas
        return self._execute_and_fetch(req)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 573, in _execute_and_fetch
        self._handle_error(rpc_error)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 619, in _handle_error
        raise SparkConnectAnalysisException(
    pyspark.sql.connect.client.SparkConnectAnalysisException: [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "map_filter(data, lambdafunction(`>`(y_9, 30.0), x_8, y_9))" due to data type mismatch: Parameter 1 requires the "MAP" type, however "data" has the type "STRUCT<bar: DOUBLE, baz: DOUBLE, foo: DOUBLE>".
    Plan: 'Project [map_filter(data#3499, lambdafunction('`>`(lambda 'y_9, 30.0), lambda 'x_8, lambda 'y_9, false)) AS data_filtered#3502]
    +- Project [0#3494L AS id#3498L, 1#3495 AS data#3499]
       +- LocalRelation [0#3494L, 1#3495]


**********************************************************************
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", line 1453, in pyspark.sql.connect.functions.map_zip_with
Failed example:
    df.select(map_zip_with(
        "base", "ratio", lambda k, v1, v2: round(v1 * v2, 2)).alias("updated_data")
    ).show(truncate=False)
Exception raised:
    Traceback (most recent call last):
      File "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py", line 1350, in __run
        exec(compile(example.source, filename, "single",
      File "<doctest pyspark.sql.connect.functions.map_zip_with[1]>", line 1, in <module>
        df.select(map_zip_with(
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 534, in show
        print(self._show_string(n, truncate, vertical))
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 423, in _show_string
        ).toPandas()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1031, in toPandas
        return self._session.client.to_pandas(query)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 413, in to_pandas
        return self._execute_and_fetch(req)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 573, in _execute_and_fetch
        self._handle_error(rpc_error)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 619, in _handle_error
        raise SparkConnectAnalysisException(
    pyspark.sql.connect.client.SparkConnectAnalysisException: [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "map_zip_with(base, ratio, lambdafunction(round(`*`(y_11, z_12), 2), x_10, y_11, z_12))" due to data type mismatch: Parameter 1 requires the "MAP" type, however "base" has the type "STRUCT<IT: DOUBLE, SALES: DOUBLE>".
    Plan: 'Project [map_zip_with(base#3573, ratio#3574, lambdafunction('round('`*`(lambda 'y_11, lambda 'z_12), 2), lambda 'x_10, lambda 'y_11, lambda 'z_12, false)) AS updated_data#3578]
    +- Project [0#3566L AS id#3572L, 1#3567 AS base#3573, 2#3568 AS ratio#3574]
       +- LocalRelation [0#3566L, 1#3567, 2#3568] {code}
{code:java}
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", line 1263, in pyspark.sql.connect.functions.element_at
Failed example:
    df.select(element_at(df.data, lit("a"))).collect()
Exception raised:
    Traceback (most recent call last):
      File "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py", line 1350, in __run
        exec(compile(example.source, filename, "single",
      File "<doctest pyspark.sql.connect.functions.element_at[4]>", line 1, in <module>
        df.select(element_at(df.data, lit("a"))).collect()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1017, in collect
        pdf = self.toPandas()
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", line 1031, in toPandas
        return self._session.client.to_pandas(query)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 413, in to_pandas
        return self._execute_and_fetch(req)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 573, in _execute_and_fetch
        self._handle_error(rpc_error)
      File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", line 619, in _handle_error
        raise SparkConnectAnalysisException(
    pyspark.sql.connect.client.SparkConnectAnalysisException: [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "element_at(data, a)" due to data type mismatch: Parameter 1 requires the ("ARRAY" or "MAP") type, however "data" has the type "STRUCT<a: DOUBLE, b: DOUBLE>".
    Plan: 'Project [unresolvedalias(element_at(data#2393, a, None, false), None)]
    +- Project [0#2391 AS data#2393]
       +- LocalRelation [0#2391] {code}


> DataFrame dropDuplicates should throw error on non list argument
> ----------------------------------------------------------------
>
>                 Key: SPARK-41869
>                 URL: https://issues.apache.org/jira/browse/SPARK-41869
>             Project: Spark
>          Issue Type: Sub-task
>          Components: Connect
>    Affects Versions: 3.4.0
>            Reporter: Sandeep Singh
>            Priority: Major
>
> {code:java}
> df = self.spark.createDataFrame([("Alice", 50), ("Alice", 60)], ["name", "age"])
> # shouldn't drop a non-null row
> self.assertEqual(df.dropDuplicates().count(), 2)
> self.assertEqual(df.dropDuplicates(["name"]).count(), 1)
> self.assertEqual(df.dropDuplicates(["name", "age"]).count(), 2)
> type_error_msg = "Parameter 'subset' must be a list of columns"
> with self.assertRaisesRegex(TypeError, type_error_msg):
>     df.dropDuplicates("name"){code}
> {code:java}
> Traceback (most recent call last):
>   File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_dataframe.py", line 128, in test_drop_duplicates
>     with self.assertRaisesRegex(TypeError, type_error_msg):
> AssertionError: TypeError not raised{code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org