You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by "allisonwang-db (via GitHub)" <gi...@apache.org> on 2023/07/25 16:53:00 UTC

[GitHub] [spark] allisonwang-db commented on a diff in pull request #42135: [SPARK-44533][PYTHON] Add support for accumulator, broadcast, and Spark files in Python UDTF's analyze

allisonwang-db commented on code in PR #42135:
URL: https://github.com/apache/spark/pull/42135#discussion_r1273829387


##########
python/pyspark/sql/tests/test_udtf.py:
##########
@@ -1214,6 +1217,153 @@ def eval(self, **kwargs):
         ):
             self.spark.sql("SELECT * FROM test_udtf(1, 'x')").collect()
 
+    def test_udtf_with_analyze_using_broadcast(self):
+        colname = self.sc.broadcast("col1")
+
+        @udtf
+        class TestUDTF:
+            @staticmethod
+            def analyze(a: AnalyzeArgument) -> AnalyzeResult:
+                return AnalyzeResult(StructType().add(colname.value, a.data_type))
+
+            def eval(self, a):
+                yield a,
+
+        df = TestUDTF(lit(10))
+        assertSchemaEqual(df.schema, StructType().add("col1", IntegerType()))
+        assertDataFrameEqual(df, [Row(col=10)])
+
+    def test_udtf_with_analyze_using_accumulator(self):
+        test_accum = self.sc.accumulator(0)
+
+        @udtf
+        class TestUDTF:
+            @staticmethod
+            def analyze(a: AnalyzeArgument) -> AnalyzeResult:
+                test_accum.add(1)
+                return AnalyzeResult(StructType().add("col1", a.data_type))
+
+            def eval(self, a):
+                test_accum.add(1)
+                yield a,
+
+        df = TestUDTF(lit(10))
+        assertSchemaEqual(df.schema, StructType().add("col1", IntegerType()))
+        assertDataFrameEqual(df, [Row(col=10)])
+        self.assertEqual(test_accum.value, 2)
+
+    def _add_pyfile(self, path):
+        self.sc.addPyFile(path)
+
+    def test_udtf_with_analyze_using_pyfile(self):

Review Comment:
   This is really cool. Can we add/modify this test case to use pyfile in 1) eval 2) terminate?



##########
python/pyspark/sql/tests/test_udtf.py:
##########
@@ -1214,6 +1217,153 @@ def eval(self, **kwargs):
         ):
             self.spark.sql("SELECT * FROM test_udtf(1, 'x')").collect()
 
+    def test_udtf_with_analyze_using_broadcast(self):
+        colname = self.sc.broadcast("col1")
+
+        @udtf
+        class TestUDTF:
+            @staticmethod
+            def analyze(a: AnalyzeArgument) -> AnalyzeResult:
+                return AnalyzeResult(StructType().add(colname.value, a.data_type))
+
+            def eval(self, a):
+                yield a,
+
+        df = TestUDTF(lit(10))
+        assertSchemaEqual(df.schema, StructType().add("col1", IntegerType()))
+        assertDataFrameEqual(df, [Row(col=10)])
+
+    def test_udtf_with_analyze_using_accumulator(self):
+        test_accum = self.sc.accumulator(0)
+
+        @udtf
+        class TestUDTF:
+            @staticmethod
+            def analyze(a: AnalyzeArgument) -> AnalyzeResult:
+                test_accum.add(1)
+                return AnalyzeResult(StructType().add("col1", a.data_type))
+
+            def eval(self, a):
+                test_accum.add(1)
+                yield a,
+
+        df = TestUDTF(lit(10))

Review Comment:
   Just curious, does the accumulator work if we use the UDTF in a SQL query?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org