You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by yh...@apache.org on 2016/01/18 19:28:05 UTC

spark git commit: [SPARK-12558][FOLLOW-UP] AnalysisException when multiple functions applied in GROUP BY clause

Repository: spark
Updated Branches:
  refs/heads/master 233d6cee9 -> db9a86058


[SPARK-12558][FOLLOW-UP] AnalysisException when multiple functions applied in GROUP BY clause

Addresses the comments from Yin.
https://github.com/apache/spark/pull/10520

Author: Dilip Biswal <db...@us.ibm.com>

Closes #10758 from dilipbiswal/spark-12558-followup.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/db9a8605
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/db9a8605
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/db9a8605

Branch: refs/heads/master
Commit: db9a860589bfc4f80d6cdf174a577ca538b82e6d
Parents: 233d6ce
Author: Dilip Biswal <db...@us.ibm.com>
Authored: Mon Jan 18 10:28:01 2016 -0800
Committer: Yin Huai <yh...@databricks.com>
Committed: Mon Jan 18 10:28:01 2016 -0800

----------------------------------------------------------------------
 .../spark/sql/hive/execution/HiveUDFSuite.scala       | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/db9a8605/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index dfe33ba..af76ff9 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -22,7 +22,7 @@ import java.util.{ArrayList, Arrays, Properties}
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hive.ql.udf.UDAFPercentile
-import org.apache.hadoop.hive.ql.udf.generic.{GenericUDAFAverage, GenericUDF, GenericUDFOPAnd, GenericUDTFExplode}
+import org.apache.hadoop.hive.ql.udf.generic._
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject
 import org.apache.hadoop.hive.serde2.{AbstractSerDe, SerDeStats}
 import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory}
@@ -351,10 +351,14 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
   }
 
   test("Hive UDF in group by") {
-    Seq(Tuple1(1451400761)).toDF("test_date").registerTempTable("tab1")
-    val count = sql("select date(cast(test_date as timestamp))" +
-      " from tab1 group by date(cast(test_date as timestamp))").count()
-    assert(count == 1)
+    withTempTable("tab1") {
+      Seq(Tuple1(1451400761)).toDF("test_date").registerTempTable("tab1")
+      sql(s"CREATE TEMPORARY FUNCTION testUDFToDate AS '${classOf[GenericUDFToDate].getName}'")
+      val count = sql("select testUDFToDate(cast(test_date as timestamp))" +
+        " from tab1 group by testUDFToDate(cast(test_date as timestamp))").count()
+      sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToDate")
+      assert(count == 1)
+    }
   }
 
   test("SPARK-11522 select input_file_name from non-parquet table"){


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org