You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2020/03/23 05:40:47 UTC

[GitHub] [spark] zhengruifeng commented on a change in pull request #27982: [SPARK-31222][ML] Make ANOVATest Sparsity-Aware

zhengruifeng commented on a change in pull request #27982: [SPARK-31222][ML] Make ANOVATest Sparsity-Aware
URL: https://github.com/apache/spark/pull/27982#discussion_r396222712
 
 

 ##########
 File path: mllib/src/test/scala/org/apache/spark/ml/stat/ANOVATestSuite.scala
 ##########
 @@ -144,22 +144,30 @@ class ANOVATestSuite
   }
 
   test("test DataFrame with sparse vector") {
-    val df = spark.createDataFrame(Seq(
-      (3, Vectors.sparse(6, Array((0, 6.0), (1, 7.0), (3, 7.0), (4, 6.0)))),
-      (1, Vectors.sparse(6, Array((1, 9.0), (2, 6.0), (4, 5.0), (5, 9.0)))),
-      (3, Vectors.sparse(6, Array((1, 9.0), (2, 3.0), (4, 5.0), (5, 5.0)))),
-      (2, Vectors.dense(Array(0.0, 9.0, 8.0, 5.0, 6.0, 4.0))),
-      (2, Vectors.dense(Array(8.0, 9.0, 6.0, 5.0, 4.0, 4.0))),
-      (3, Vectors.dense(Array(8.0, 9.0, 6.0, 4.0, 0.0, 0.0)))
-    )).toDF("label", "features")
+    val data = Seq(
+      (3, Vectors.dense(Array(6.0, 7.0, 0.0, 7.0, 6.0, 0.0, 0.0))),
+      (1, Vectors.dense(Array(0.0, 9.0, 6.0, 0.0, 5.0, 9.0, 0.0))),
+      (3, Vectors.dense(Array(0.0, 9.0, 3.0, 0.0, 5.0, 5.0, 0.0))),
+      (2, Vectors.dense(Array(0.0, 9.0, 8.0, 5.0, 6.0, 4.0, 0.0))),
+      (2, Vectors.dense(Array(8.0, 9.0, 6.0, 5.0, 4.0, 4.0, 0.0))),
+      (3, Vectors.dense(Array(8.0, 9.0, 6.0, 4.0, 0.0, 0.0, 0.0))))
 
-    val anovaResult = ANOVATest.test(df, "features", "label")
-    val (pValues: Vector, fValues: Vector) =
-      anovaResult.select("pValues", "fValues")
-        .as[(Vector, Vector)].head()
-    assert(pValues ~== Vectors.dense(0.71554175, 0.71554175, 0.34278574, 0.45824059, 0.84633632,
-      0.15673368) relTol 1e-6)
-    assert(fValues ~== Vectors.dense(0.375, 0.375, 1.5625, 1.02364865, 0.17647059,
-      3.66) relTol 1e-6)
+    val df1 = spark.createDataFrame(data.map(t => (t._1, t._2.toDense)))
+      .toDF("label", "features")
+    val df2 = spark.createDataFrame(data.map(t => (t._1, t._2.toSparse)))
+      .toDF("label", "features")
+    val df3 = spark.createDataFrame(data.map(t => (t._1, t._2.compressed)))
+      .toDF("label", "features")
+
+    Seq(df1, df2, df3).foreach { df =>
+      val anovaResult = ANOVATest.test(df, "features", "label")
+      val (pValues: Vector, fValues: Vector) =
+        anovaResult.select("pValues", "fValues")
+          .as[(Vector, Vector)].head()
+      assert(pValues ~== Vectors.dense(0.71554175, 0.71554175, 0.34278574, 0.45824059, 0.84633632,
+        0.15673368, Double.NaN) relTol 1e-6)
 
 Review comment:
   for column only containing zero values, sklearn will return nan:
   ```python
   X = np.zeros([3,5])
   
   y = [1,2,3]
   
   f_classif(X, y)
   /home/zrf/Applications/anaconda3/lib/python3.7/site-packages/sklearn/feature_selection/_univariate_selection.py:110: RuntimeWarning: invalid value encountered in true_divide
     msw = sswn / float(dfwn)
   Out[24]: (array([nan, nan, nan, nan, nan]), array([nan, nan, nan, nan, nan]))
   =
   ```

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org