You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ma...@apache.org on 2014/09/13 05:14:17 UTC

git commit: [SPARK-3515][SQL] Moves test suite setup code to beforeAll rather than in constructor

Repository: spark
Updated Branches:
  refs/heads/master 885d1621b -> 6d887db78


[SPARK-3515][SQL] Moves test suite setup code to beforeAll rather than in constructor

Please refer to the JIRA ticket for details.

**NOTE** We should check all test suites that do similar initialization-like side effects in their constructors. This PR only fixes `ParquetMetastoreSuite` because it breaks our Jenkins Maven build.

Author: Cheng Lian <li...@gmail.com>

Closes #2375 from liancheng/say-no-to-constructor and squashes the following commits:

0ceb75b [Cheng Lian] Moves test suite setup code to beforeAll rather than in constructor


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6d887db7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6d887db7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6d887db7

Branch: refs/heads/master
Commit: 6d887db7891be643f0131b136e82191b5f6eb407
Parents: 885d162
Author: Cheng Lian <li...@gmail.com>
Authored: Fri Sep 12 20:14:09 2014 -0700
Committer: Michael Armbrust <mi...@databricks.com>
Committed: Fri Sep 12 20:14:09 2014 -0700

----------------------------------------------------------------------
 .../sql/parquet/ParquetMetastoreSuite.scala     | 53 +++++++++-----------
 1 file changed, 24 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/6d887db7/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
index 0723be7..e380280 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
@@ -20,14 +20,10 @@ package org.apache.spark.sql.parquet
 
 import java.io.File
 
-import org.apache.spark.sql.hive.execution.HiveTableScan
 import org.scalatest.BeforeAndAfterAll
 
-import scala.reflect.ClassTag
-
-import org.apache.spark.sql.{SQLConf, QueryTest}
-import org.apache.spark.sql.execution.{BroadcastHashJoin, ShuffledHashJoin}
-import org.apache.spark.sql.hive.test.TestHive
+import org.apache.spark.sql.QueryTest
+import org.apache.spark.sql.hive.execution.HiveTableScan
 import org.apache.spark.sql.hive.test.TestHive._
 
 case class ParquetData(intField: Int, stringField: String)
@@ -36,27 +32,19 @@ case class ParquetData(intField: Int, stringField: String)
  * Tests for our SerDe -> Native parquet scan conversion.
  */
 class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
-
   override def beforeAll(): Unit = {
-    setConf("spark.sql.hive.convertMetastoreParquet", "true")
-  }
-
-  override def afterAll(): Unit = {
-    setConf("spark.sql.hive.convertMetastoreParquet", "false")
-  }
-
-  val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
-  partitionedTableDir.delete()
-  partitionedTableDir.mkdir()
-
-  (1 to 10).foreach { p =>
-    val partDir = new File(partitionedTableDir, s"p=$p")
-    sparkContext.makeRDD(1 to 10)
-      .map(i => ParquetData(i, s"part-$p"))
-      .saveAsParquetFile(partDir.getCanonicalPath)
-  }
-
-  sql(s"""
+    val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
+    partitionedTableDir.delete()
+    partitionedTableDir.mkdir()
+
+    (1 to 10).foreach { p =>
+      val partDir = new File(partitionedTableDir, s"p=$p")
+      sparkContext.makeRDD(1 to 10)
+        .map(i => ParquetData(i, s"part-$p"))
+        .saveAsParquetFile(partDir.getCanonicalPath)
+    }
+
+    sql(s"""
     create external table partitioned_parquet
     (
       intField INT,
@@ -70,7 +58,7 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
     location '${partitionedTableDir.getCanonicalPath}'
     """)
 
-  sql(s"""
+    sql(s"""
     create external table normal_parquet
     (
       intField INT,
@@ -83,8 +71,15 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
     location '${new File(partitionedTableDir, "p=1").getCanonicalPath}'
     """)
 
-  (1 to 10).foreach { p =>
-    sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
+    (1 to 10).foreach { p =>
+      sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
+    }
+
+    setConf("spark.sql.hive.convertMetastoreParquet", "true")
+  }
+
+  override def afterAll(): Unit = {
+    setConf("spark.sql.hive.convertMetastoreParquet", "false")
   }
 
   test("project the partitioning column") {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org