You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by yh...@apache.org on 2016/11/09 17:50:00 UTC

spark git commit: [SPARK-18338][SQL][TEST-MAVEN] Fix test case initialization order under Maven builds

Repository: spark
Updated Branches:
  refs/heads/master 02c5325b8 -> 205e6d586


[SPARK-18338][SQL][TEST-MAVEN] Fix test case initialization order under Maven builds

## What changes were proposed in this pull request?

Test case initialization order under Maven and SBT are different. Maven always creates instances of all test cases and then run them all together.

This fails `ObjectHashAggregateSuite` because the randomized test cases there register a temporary Hive function right before creating a test case, and can be cleared while initializing other successive test cases. In SBT, this is fine since the created test case is executed immediately after creating the temporary function.

To fix this issue, we should put initialization/destruction code into `beforeAll()` and `afterAll()`.

## How was this patch tested?

Existing tests.

Author: Cheng Lian <li...@databricks.com>

Closes #15802 from liancheng/fix-flaky-object-hash-agg-suite.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/205e6d58
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/205e6d58
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/205e6d58

Branch: refs/heads/master
Commit: 205e6d5867b180a85bad58035c917ca13552a0a5
Parents: 02c5325
Author: Cheng Lian <li...@databricks.com>
Authored: Wed Nov 9 09:49:02 2016 -0800
Committer: Yin Huai <yh...@databricks.com>
Committed: Wed Nov 9 09:49:02 2016 -0800

----------------------------------------------------------------------
 .../execution/ObjectHashAggregateSuite.scala    | 23 +++++++++-----------
 1 file changed, 10 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/205e6d58/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ObjectHashAggregateSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ObjectHashAggregateSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ObjectHashAggregateSuite.scala
index 527626b..93fc5e8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ObjectHashAggregateSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ObjectHashAggregateSuite.scala
@@ -25,11 +25,10 @@ import org.scalatest.Matchers._
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.FunctionIdentifier
 import org.apache.spark.sql.catalyst.analysis.UnresolvedFunction
-import org.apache.spark.sql.catalyst.expressions.{ExpressionEvalHelper, ExpressionInfo, Literal}
+import org.apache.spark.sql.catalyst.expressions.{ExpressionEvalHelper, Literal}
 import org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile
 import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec}
 import org.apache.spark.sql.functions._
-import org.apache.spark.sql.hive.HiveSessionCatalog
 import org.apache.spark.sql.hive.test.TestHiveSingleton
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.test.SQLTestUtils
@@ -43,6 +42,14 @@ class ObjectHashAggregateSuite
 
   import testImplicits._
 
+  protected override def beforeAll(): Unit = {
+    sql(s"CREATE TEMPORARY FUNCTION hive_max AS '${classOf[GenericUDAFMax].getName}'")
+  }
+
+  protected override def afterAll(): Unit = {
+    sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
+  }
+
   test("typed_count without grouping keys") {
     val df = Seq((1: Integer, 2), (null, 2), (3: Integer, 4)).toDF("a", "b")
 
@@ -199,10 +206,7 @@ class ObjectHashAggregateSuite
     val typed = percentile_approx($"c0", 0.5)
 
     // A Hive UDAF without partial aggregation support
-    val withoutPartial = {
-      registerHiveFunction("hive_max", classOf[GenericUDAFMax])
-      function("hive_max", $"c1")
-    }
+    val withoutPartial = function("hive_max", $"c1")
 
     // A Spark SQL native aggregate function with partial aggregation support that can be executed
     // by the Tungsten `HashAggregateExec`
@@ -420,13 +424,6 @@ class ObjectHashAggregateSuite
     }
   }
 
-  private def registerHiveFunction(functionName: String, clazz: Class[_]): Unit = {
-    val sessionCatalog = spark.sessionState.catalog.asInstanceOf[HiveSessionCatalog]
-    val builder = sessionCatalog.makeFunctionBuilder(functionName, clazz.getName)
-    val info = new ExpressionInfo(clazz.getName, functionName)
-    sessionCatalog.createTempFunction(functionName, info, builder, ignoreIfExists = false)
-  }
-
   private def function(name: String, args: Column*): Column = {
     Column(UnresolvedFunction(FunctionIdentifier(name), args.map(_.expr), isDistinct = false))
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org