You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Sandeep Singh (Jira)" <ji...@apache.org> on 2023/01/04 15:07:00 UTC
[jira] [Updated] (SPARK-41884) DataFrame `toPandas` parity in return types
[ https://issues.apache.org/jira/browse/SPARK-41884?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Sandeep Singh updated SPARK-41884:
----------------------------------
Description:
{code:java}
import numpy as np
import pandas as pd
df = self.spark.createDataFrame(
[[[("a", 2, 3.0), ("a", 2, 3.0)]], [[("b", 5, 6.0), ("b", 5, 6.0)]]],
"array_struct_col Array<struct<col1:string, col2:long, col3:double>>",
)
for is_arrow_enabled in [True, False]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": is_arrow_enabled}):
pdf = df.toPandas()
self.assertEqual(type(pdf), pd.DataFrame)
self.assertEqual(type(pdf["array_struct_col"]), pd.Series)
if is_arrow_enabled:
self.assertEqual(type(pdf["array_struct_col"][0]), np.ndarray)
else:
self.assertEqual(type(pdf["array_struct_col"][0]), list){code}
{code:java}
Traceback (most recent call last):
1415 File "/__w/spark/spark/python/pyspark/sql/tests/test_dataframe.py", line 1202, in test_to_pandas_for_array_of_struct
1416 df = self.spark.createDataFrame(
1417 File "/__w/spark/spark/python/pyspark/sql/connect/session.py", line 264, in createDataFrame
1418 table = pa.Table.from_pylist([dict(zip(_cols, list(item))) for item in _data])
1419 File "pyarrow/table.pxi", line 3700, in pyarrow.lib.Table.from_pylist
1420 File "pyarrow/table.pxi", line 5221, in pyarrow.lib._from_pylist
1421 File "pyarrow/table.pxi", line 3575, in pyarrow.lib.Table.from_arrays
1422 File "pyarrow/table.pxi", line 1383, in pyarrow.lib._sanitize_arrays
1423 File "pyarrow/table.pxi", line 1364, in pyarrow.lib._schema_from_arrays
1424 File "pyarrow/array.pxi", line 320, in pyarrow.lib.array
1425 File "pyarrow/array.pxi", line 39, in pyarrow.lib._sequence_to_array
1426 File "pyarrow/error.pxi", line 144, in pyarrow.lib.pyarrow_internal_check_status
1427 File "pyarrow/error.pxi", line 123, in pyarrow.lib.check_status
1428pyarrow.lib.ArrowTypeError: Expected bytes, got a 'int' object{code}
{code:java}
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEqual(types[0], np.int32)
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.bool)
self.assertEqual(types[3], np.float32)
self.assertEqual(types[4], np.object) # datetime.date
self.assertEqual(types[5], "datetime64[ns]")
self.assertEqual(types[6], "datetime64[ns]")
self.assertEqual(types[7], "timedelta64[ns]") {code}
{code:java}
Traceback (most recent call last): 1434 File "/__w/spark/spark/python/pyspark/sql/tests/test_dataframe.py", line 1039, in test_to_pandas 1435 self.assertEqual(types[5], "datetime64[ns]") 1436AssertionError: datetime64[ns, Etc/UTC] != 'datetime64[ns]' 1437
{code}
was:
{code:java}
schema = StructType(
[StructField("i", StringType(), True), StructField("j", IntegerType(), True)]
)
df = self.spark.createDataFrame([("a", 1)], schema)
schema1 = StructType([StructField("j", StringType()), StructField("i", StringType())])
df1 = df.to(schema1)
self.assertEqual(schema1, df1.schema)
self.assertEqual(df.count(), df1.count())
schema2 = StructType([StructField("j", LongType())])
df2 = df.to(schema2)
self.assertEqual(schema2, df2.schema)
self.assertEqual(df.count(), df2.count())
schema3 = StructType([StructField("struct", schema1, False)])
df3 = df.select(struct("i", "j").alias("struct")).to(schema3)
self.assertEqual(schema3, df3.schema)
self.assertEqual(df.count(), df3.count())
# incompatible field nullability
schema4 = StructType([StructField("j", LongType(), False)])
self.assertRaisesRegex(
AnalysisException, "NULLABLE_COLUMN_OR_FIELD", lambda: df.to(schema4)
){code}
{code:java}
Traceback (most recent call last):
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_dataframe.py", line 1486, in test_to
self.assertRaisesRegex(
AssertionError: AnalysisException not raised by <lambda> {code}
> DataFrame `toPandas` parity in return types
> -------------------------------------------
>
> Key: SPARK-41884
> URL: https://issues.apache.org/jira/browse/SPARK-41884
> Project: Spark
> Issue Type: Sub-task
> Components: Connect
> Affects Versions: 3.4.0
> Reporter: Sandeep Singh
> Priority: Major
>
> {code:java}
> import numpy as np
> import pandas as pd
> df = self.spark.createDataFrame(
> [[[("a", 2, 3.0), ("a", 2, 3.0)]], [[("b", 5, 6.0), ("b", 5, 6.0)]]],
> "array_struct_col Array<struct<col1:string, col2:long, col3:double>>",
> )
> for is_arrow_enabled in [True, False]:
> with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": is_arrow_enabled}):
> pdf = df.toPandas()
> self.assertEqual(type(pdf), pd.DataFrame)
> self.assertEqual(type(pdf["array_struct_col"]), pd.Series)
> if is_arrow_enabled:
> self.assertEqual(type(pdf["array_struct_col"][0]), np.ndarray)
> else:
> self.assertEqual(type(pdf["array_struct_col"][0]), list){code}
> {code:java}
> Traceback (most recent call last):
> 1415 File "/__w/spark/spark/python/pyspark/sql/tests/test_dataframe.py", line 1202, in test_to_pandas_for_array_of_struct
> 1416 df = self.spark.createDataFrame(
> 1417 File "/__w/spark/spark/python/pyspark/sql/connect/session.py", line 264, in createDataFrame
> 1418 table = pa.Table.from_pylist([dict(zip(_cols, list(item))) for item in _data])
> 1419 File "pyarrow/table.pxi", line 3700, in pyarrow.lib.Table.from_pylist
> 1420 File "pyarrow/table.pxi", line 5221, in pyarrow.lib._from_pylist
> 1421 File "pyarrow/table.pxi", line 3575, in pyarrow.lib.Table.from_arrays
> 1422 File "pyarrow/table.pxi", line 1383, in pyarrow.lib._sanitize_arrays
> 1423 File "pyarrow/table.pxi", line 1364, in pyarrow.lib._schema_from_arrays
> 1424 File "pyarrow/array.pxi", line 320, in pyarrow.lib.array
> 1425 File "pyarrow/array.pxi", line 39, in pyarrow.lib._sequence_to_array
> 1426 File "pyarrow/error.pxi", line 144, in pyarrow.lib.pyarrow_internal_check_status
> 1427 File "pyarrow/error.pxi", line 123, in pyarrow.lib.check_status
> 1428pyarrow.lib.ArrowTypeError: Expected bytes, got a 'int' object{code}
>
> {code:java}
> import numpy as np
> pdf = self._to_pandas()
> types = pdf.dtypes
> self.assertEqual(types[0], np.int32)
> self.assertEqual(types[1], np.object)
> self.assertEqual(types[2], np.bool)
> self.assertEqual(types[3], np.float32)
> self.assertEqual(types[4], np.object) # datetime.date
> self.assertEqual(types[5], "datetime64[ns]")
> self.assertEqual(types[6], "datetime64[ns]")
> self.assertEqual(types[7], "timedelta64[ns]") {code}
> {code:java}
> Traceback (most recent call last): 1434 File "/__w/spark/spark/python/pyspark/sql/tests/test_dataframe.py", line 1039, in test_to_pandas 1435 self.assertEqual(types[5], "datetime64[ns]") 1436AssertionError: datetime64[ns, Etc/UTC] != 'datetime64[ns]' 1437
> {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org