You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2021/03/12 12:39:25 UTC

[GitHub] [spark] maropu commented on a change in pull request #31735: [SPARK-34600][PYTHON][SQL] Return User-defined types from Pandas UDF

maropu commented on a change in pull request #31735:
URL: https://github.com/apache/spark/pull/31735#discussion_r593134358



##########
File path: sql/core/src/main/scala/org/apache/spark/sql/execution/python/ArrowEvalPythonExec.scala
##########
@@ -89,9 +89,35 @@ case class ArrowEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute]
 
     columnarBatchIter.flatMap { batch =>
       val actualDataTypes = (0 until batch.numCols()).map(i => batch.column(i).dataType())
-      assert(outputTypes == actualDataTypes, "Invalid schema from pandas_udf: " +
-        s"expected ${outputTypes.mkString(", ")}, got ${actualDataTypes.mkString(", ")}")
+      assert(plainSchema(outputTypes) == actualDataTypes,
+        "Incompatible schema from pandas_udf: " +
+          s"expected ${outputTypes.mkString(", ")}, got ${actualDataTypes.mkString(", ")}")
       batch.rowIterator.asScala
     }
   }
+
+  private def plainSchema(schema: Seq[DataType]): Seq[DataType] =
+    schema.map(v => plainSchema(v, false)).toList
+
+  /** Erase User-Defined Types and returns the plain Spark StructType instead.
+   *

Review comment:
       nit:
   ```
     /** 
      * Erase User-Defined Types and returns the plain Spark StructType instead.
   ```

##########
File path: python/pyspark/sql/pandas/serializers.py
##########
@@ -155,12 +157,18 @@ def _create_batch(self, series):
         from pandas.api.types import is_categorical_dtype
         # Make input conform to [(series1, type1), (series2, type2), ...]
         if not isinstance(series, (list, tuple)) or \
-                (len(series) == 2 and isinstance(series[1], pa.DataType)):
+                (len(series) == 2 and isinstance(series[1], (pa.DataType, DataType))):

Review comment:
       I feel the current logic to handle arrow/spark types together makes _create_batch more complicated. We cannot pull out the logic to handle `UserDefinedType`s from this method?

##########
File path: sql/core/src/main/scala/org/apache/spark/sql/execution/python/ArrowEvalPythonExec.scala
##########
@@ -89,9 +89,35 @@ case class ArrowEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute]
 
     columnarBatchIter.flatMap { batch =>
       val actualDataTypes = (0 until batch.numCols()).map(i => batch.column(i).dataType())
-      assert(outputTypes == actualDataTypes, "Invalid schema from pandas_udf: " +
-        s"expected ${outputTypes.mkString(", ")}, got ${actualDataTypes.mkString(", ")}")
+      assert(plainSchema(outputTypes) == actualDataTypes,
+        "Incompatible schema from pandas_udf: " +
+          s"expected ${outputTypes.mkString(", ")}, got ${actualDataTypes.mkString(", ")}")
       batch.rowIterator.asScala
     }
   }
+
+  private def plainSchema(schema: Seq[DataType]): Seq[DataType] =
+    schema.map(v => plainSchema(v, false)).toList
+
+  /** Erase User-Defined Types and returns the plain Spark StructType instead.
+   *
+   * @note
+   * PyArrow returns `ArrayType` with `containsNull=true`

Review comment:
       What does this means? What if the return type is `@pandas_udf(ArrayType(xxx, False))`?

##########
File path: python/pyspark/sql/pandas/serializers.py
##########
@@ -155,12 +157,18 @@ def _create_batch(self, series):
         from pandas.api.types import is_categorical_dtype
         # Make input conform to [(series1, type1), (series2, type2), ...]
         if not isinstance(series, (list, tuple)) or \
-                (len(series) == 2 and isinstance(series[1], pa.DataType)):
+                (len(series) == 2 and isinstance(series[1], (pa.DataType, DataType))):
             series = [series]
         series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
 
-        def create_array(s, t):
+        def create_array(s, dt: DataType, t: pa.DataType):
             mask = s.isnull()
+            if isinstance(dt, UserDefinedType):
+                s = s.apply(dt.serialize)

Review comment:
       Does it work correctly when `UserDefinedType.sqlType` has timestamp/dict? Could you add some tests for these types, too?

##########
File path: python/pyspark/sql/tests/test_pandas_udf_scalar.py
##########
@@ -1109,6 +1109,102 @@ def f3i(it):
 
             self.assertEqual(expected, df1.collect())
 
+    # SPARK-34600
+    def test_user_defined_types_with_udf(self):
+        """PandasUDF returns single UDT out.
+        """
+
+        # ExamplePointUDT uses ArrayType to present its sqlType.
+        @pandas_udf(ExamplePointUDT())
+        def create_vector(series: pd.Series) -> pd.Series:
+            vectors = []
+            for _, item in series.items():
+                vectors.append(ExamplePoint(item, item + 1))
+            return pd.Series(vectors)
+
+        # ExampleBoxUDT uses StructType to present its sqlType.
+        @pandas_udf(ExampleBoxUDT())
+        def create_boxes(series: pd.Series) -> pd.Series:
+            boxes = []
+            for _, item in series.items():
+                boxes.append(ExampleBox(item, item + 1, item + 2, item + 3))
+            return pd.Series(boxes)
+
+        df = self.spark.range(2)
+        df = (
+            df
+            .withColumn("vector", create_vector(col("id")))
+            .withColumn("box", create_boxes(col("id")))
+        )
+        df.show()
+        self.assertEqual([
+            Row(id=0, vector=ExamplePoint(0, 1), box=ExampleBox(0, 1, 2, 3)),
+            Row(id=1, vector=ExamplePoint(1, 2), box=ExampleBox(1, 2, 3, 4))
+        ], df.collect())
+
+    # SPARK-34600
+    def test_user_defined_types_in_struct(self):
+        @pandas_udf(StructType([
+            StructField("vec", ArrayType(ExamplePointUDT())),
+            StructField("box", ArrayType(ExampleBoxUDT()))
+        ]))
+        def array_of_udt_structs(series: pd.Series) -> pd.DataFrame:
+            vectors = []
+            for _, i in series.items():
+                vectors.append({
+                    "vec": [ExamplePoint(i, i), ExamplePoint(i + 1, i + 1)],
+                    "box": [ExampleBox(*([i] * 4)), ExampleBox(*([i+1] * 4))],
+                })
+            return pd.DataFrame(vectors)
+
+        df = self.spark.range(1, 3)
+        df = df.withColumn("nested", array_of_udt_structs(df.id))
+        df.show()
+        self.assertEqual([
+            Row(id=1, nested=Row(
+                vec=[ExamplePoint(1, 1), ExamplePoint(2, 2)],
+                box=[ExampleBox(1, 1, 1, 1), ExampleBox(2, 2, 2, 2)])),
+            Row(id=2, nested=Row(
+                vec=[ExamplePoint(2, 2), ExamplePoint(3, 3)],
+                box=[ExampleBox(2, 2, 2, 2), ExampleBox(3, 3, 3, 3)]))
+        ], df.collect())
+
+    # SPARK-34600
+    def test_user_defined_types_in_array(self):

Review comment:
       Could you add tests for negative cases, e.g., `UserDefinedType.sqlType` has unsupported types ?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org