You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2022/12/29 09:39:23 UTC

[GitHub] [spark] zhengruifeng commented on a diff in pull request #39262: [SPARK-41069][CONNECT][PYTHON] Implement `DataFrame.approxQuantile` and `DataFrame.stat.approxQuantile`

zhengruifeng commented on code in PR #39262:
URL: https://github.com/apache/spark/pull/39262#discussion_r1058843822


##########
connector/connect/common/src/main/protobuf/spark/connect/relations.proto:
##########
@@ -70,6 +70,7 @@ message Relation {
     StatCrosstab crosstab = 101;
     StatDescribe describe = 102;
     StatCov cov = 103;
+    StatApproxQuantile approx_quantile = 104;

Review Comment:
   let's merge https://github.com/apache/spark/pull/39236 first



##########
python/pyspark/sql/tests/connect/test_connect_basic.py:
##########
@@ -1013,6 +1013,42 @@ def test_stat_cov(self):
             self.spark.read.table(self.tbl_name2).stat.cov("col1", "col3"),
         )
 
+    def test_stat_approx_quantile(self):
+        # SPARK-41069: Test the stat.approxQuantile method
+        result = self.connect.read.table(self.tbl_name2).stat.approxQuantile(
+            ["col1", "col3"], [0.1, 0.5, 0.9], 0.1

Review Comment:
   please also add a case with single column



##########
python/pyspark/sql/tests/connect/test_connect_basic.py:
##########
@@ -1013,6 +1013,42 @@ def test_stat_cov(self):
             self.spark.read.table(self.tbl_name2).stat.cov("col1", "col3"),
         )
 
+    def test_stat_approx_quantile(self):
+        # SPARK-41069: Test the stat.approxQuantile method
+        result = self.connect.read.table(self.tbl_name2).stat.approxQuantile(
+            ["col1", "col3"], [0.1, 0.5, 0.9], 0.1
+        )
+        self.assertEqual(len(result), 2)
+        self.assertEqual(len(result[0]), 3)
+        self.assertEqual(len(result[1]), 3)
+
+        with self.assertRaisesRegex(
+            TypeError, "col should be a string, list or tuple, but got <class 'int'>"
+        ):
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile(1, [0.1, 0.5, 0.9], 0.1)
+        with self.assertRaisesRegex(TypeError, "columns should be strings, but got <class 'int'>"):
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile([1], [0.1, 0.5, 0.9], 0.1)
+        with self.assertRaisesRegex(TypeError, "probabilities should be a list or tuple"):
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile(["col1", "col3"], 0.1, 0.1)
+        with self.assertRaises(ValueError) as context:

Review Comment:
   why not using `with self. assertRaisesRegex(ValueError, "probabilities should be numerical")`



##########
python/pyspark/sql/tests/connect/test_connect_basic.py:
##########
@@ -1013,6 +1013,42 @@ def test_stat_cov(self):
             self.spark.read.table(self.tbl_name2).stat.cov("col1", "col3"),
         )
 
+    def test_stat_approx_quantile(self):
+        # SPARK-41069: Test the stat.approxQuantile method
+        result = self.connect.read.table(self.tbl_name2).stat.approxQuantile(
+            ["col1", "col3"], [0.1, 0.5, 0.9], 0.1
+        )
+        self.assertEqual(len(result), 2)
+        self.assertEqual(len(result[0]), 3)
+        self.assertEqual(len(result[1]), 3)
+
+        with self.assertRaisesRegex(
+            TypeError, "col should be a string, list or tuple, but got <class 'int'>"
+        ):
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile(1, [0.1, 0.5, 0.9], 0.1)
+        with self.assertRaisesRegex(TypeError, "columns should be strings, but got <class 'int'>"):
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile([1], [0.1, 0.5, 0.9], 0.1)
+        with self.assertRaisesRegex(TypeError, "probabilities should be a list or tuple"):
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile(["col1", "col3"], 0.1, 0.1)
+        with self.assertRaises(ValueError) as context:
+            self.connect.read.table(self.tbl_name2).stat.approxQuantile(
+                ["col1", "col3"], [-0.1], 0.1
+            )
+            self.assertTrue(
+                "probabilities should be numerical (float, int) in [0,1]" in str(context.exception)
+            )
+        with self.assertRaises(TypeError) as context:

Review Comment:
   ditto



##########
python/pyspark/sql/tests/connect/test_connect_basic.py:
##########
@@ -1013,6 +1013,42 @@ def test_stat_cov(self):
             self.spark.read.table(self.tbl_name2).stat.cov("col1", "col3"),
         )
 
+    def test_stat_approx_quantile(self):
+        # SPARK-41069: Test the stat.approxQuantile method
+        result = self.connect.read.table(self.tbl_name2).stat.approxQuantile(

Review Comment:
   let's compare the results with PySpark



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org