You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by me...@apache.org on 2016/01/19 23:06:56 UTC

spark git commit: [SPARK-11295] Add packages to JUnit output for Python tests

Repository: spark
Updated Branches:
  refs/heads/master c78e2080e -> c6f971b4a


[SPARK-11295] Add packages to JUnit output for Python tests

SPARK-11295 Add packages to JUnit output for Python tests

This improves grouping/display of test case results.

Author: Gábor Lipták <gl...@gmail.com>

Closes #9263 from gliptak/SPARK-11295.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/c6f971b4
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/c6f971b4
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/c6f971b4

Branch: refs/heads/master
Commit: c6f971b4aeca7265ab374fa46c5c452461d9b6a7
Parents: c78e208
Author: Gábor Lipták <gl...@gmail.com>
Authored: Tue Jan 19 14:06:53 2016 -0800
Committer: Xiangrui Meng <me...@databricks.com>
Committed: Tue Jan 19 14:06:53 2016 -0800

----------------------------------------------------------------------
 python/pyspark/ml/tests.py        |  1 +
 python/pyspark/mllib/tests.py     | 24 ++++++++++++++----------
 python/pyspark/sql/tests.py       |  1 +
 python/pyspark/streaming/tests.py |  1 +
 python/pyspark/tests.py           |  1 +
 5 files changed, 18 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/c6f971b4/python/pyspark/ml/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py
index 4eb17bf..9ea639d 100644
--- a/python/pyspark/ml/tests.py
+++ b/python/pyspark/ml/tests.py
@@ -394,6 +394,7 @@ class CrossValidatorTests(PySparkTestCase):
 
 
 if __name__ == "__main__":
+    from pyspark.ml.tests import *
     if xmlrunner:
         unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
     else:

http://git-wip-us.apache.org/repos/asf/spark/blob/c6f971b4/python/pyspark/mllib/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py
index 32ed48e..ea7d297 100644
--- a/python/pyspark/mllib/tests.py
+++ b/python/pyspark/mllib/tests.py
@@ -77,21 +77,24 @@ except:
     pass
 
 ser = PickleSerializer()
-sc = SparkContext('local[4]', "MLlib tests")
 
 
 class MLlibTestCase(unittest.TestCase):
     def setUp(self):
-        self.sc = sc
+        self.sc = SparkContext('local[4]', "MLlib tests")
+
+    def tearDown(self):
+        self.sc.stop()
 
 
 class MLLibStreamingTestCase(unittest.TestCase):
     def setUp(self):
-        self.sc = sc
+        self.sc = SparkContext('local[4]', "MLlib tests")
         self.ssc = StreamingContext(self.sc, 1.0)
 
     def tearDown(self):
         self.ssc.stop(False)
+        self.sc.stop()
 
     @staticmethod
     def _eventually(condition, timeout=30.0, catch_assertions=False):
@@ -1166,7 +1169,7 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
             clusterWeights=[1.0, 1.0, 1.0, 1.0])
 
         predict_data = [[[1.5, 1.5]], [[-1.5, 1.5]], [[-1.5, -1.5]], [[1.5, -1.5]]]
-        predict_data = [sc.parallelize(batch, 1) for batch in predict_data]
+        predict_data = [self.sc.parallelize(batch, 1) for batch in predict_data]
         predict_stream = self.ssc.queueStream(predict_data)
         predict_val = stkm.predictOn(predict_stream)
 
@@ -1197,7 +1200,7 @@ class StreamingKMeansTest(MLLibStreamingTestCase):
         # classification based in the initial model would have been 0
         # proving that the model is updated.
         batches = [[[-0.5], [0.6], [0.8]], [[0.2], [-0.1], [0.3]]]
-        batches = [sc.parallelize(batch) for batch in batches]
+        batches = [self.sc.parallelize(batch) for batch in batches]
         input_stream = self.ssc.queueStream(batches)
         predict_results = []
 
@@ -1230,7 +1233,7 @@ class LinearDataGeneratorTests(MLlibTestCase):
             self.assertEqual(len(point.features), 3)
 
         linear_data = LinearDataGenerator.generateLinearRDD(
-            sc=sc, nexamples=6, nfeatures=2, eps=0.1,
+            sc=self.sc, nexamples=6, nfeatures=2, eps=0.1,
             nParts=2, intercept=0.0).collect()
         self.assertEqual(len(linear_data), 6)
         for point in linear_data:
@@ -1406,7 +1409,7 @@ class StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
         for i in range(10):
             batch = LinearDataGenerator.generateLinearInput(
                 0.0, [10.0, 10.0], xMean, xVariance, 100, 42 + i, 0.1)
-            batches.append(sc.parallelize(batch))
+            batches.append(self.sc.parallelize(batch))
 
         input_stream = self.ssc.queueStream(batches)
         slr.trainOn(input_stream)
@@ -1430,7 +1433,7 @@ class StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
         for i in range(10):
             batch = LinearDataGenerator.generateLinearInput(
                 0.0, [10.0], [0.0], [1.0 / 3.0], 100, 42 + i, 0.1)
-            batches.append(sc.parallelize(batch))
+            batches.append(self.sc.parallelize(batch))
 
         model_weights = []
         input_stream = self.ssc.queueStream(batches)
@@ -1463,7 +1466,7 @@ class StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
                 0.0, [10.0, 10.0], [0.0, 0.0], [1.0 / 3.0, 1.0 / 3.0],
                 100, 42 + i, 0.1)
             batches.append(
-                sc.parallelize(batch).map(lambda lp: (lp.label, lp.features)))
+                self.sc.parallelize(batch).map(lambda lp: (lp.label, lp.features)))
 
         input_stream = self.ssc.queueStream(batches)
         output_stream = slr.predictOnValues(input_stream)
@@ -1494,7 +1497,7 @@ class StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
         for i in range(10):
             batch = LinearDataGenerator.generateLinearInput(
                 0.0, [10.0], [0.0], [1.0 / 3.0], 100, 42 + i, 0.1)
-            batches.append(sc.parallelize(batch))
+            batches.append(self.sc.parallelize(batch))
 
         predict_batches = [
             b.map(lambda lp: (lp.label, lp.features)) for b in batches]
@@ -1580,6 +1583,7 @@ class ALSTests(MLlibTestCase):
 
 
 if __name__ == "__main__":
+    from pyspark.mllib.tests import *
     if not _have_scipy:
         print("NOTE: Skipping SciPy tests as it does not seem to be installed")
     if xmlrunner:

http://git-wip-us.apache.org/repos/asf/spark/blob/c6f971b4/python/pyspark/sql/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index c03cb93..ae86202 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -1259,6 +1259,7 @@ class HiveContextSQLTests(ReusedPySparkTestCase):
 
 
 if __name__ == "__main__":
+    from pyspark.sql.tests import *
     if xmlrunner:
         unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
     else:

http://git-wip-us.apache.org/repos/asf/spark/blob/c6f971b4/python/pyspark/streaming/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/streaming/tests.py b/python/pyspark/streaming/tests.py
index 86b05d9..24b8126 100644
--- a/python/pyspark/streaming/tests.py
+++ b/python/pyspark/streaming/tests.py
@@ -1635,6 +1635,7 @@ kinesis_test_environ_var = "ENABLE_KINESIS_TESTS"
 are_kinesis_tests_enabled = os.environ.get(kinesis_test_environ_var) == '1'
 
 if __name__ == "__main__":
+    from pyspark.streaming.tests import *
     kafka_assembly_jar = search_kafka_assembly_jar()
     flume_assembly_jar = search_flume_assembly_jar()
     mqtt_assembly_jar = search_mqtt_assembly_jar()

http://git-wip-us.apache.org/repos/asf/spark/blob/c6f971b4/python/pyspark/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py
index 5bd9447..2372050 100644
--- a/python/pyspark/tests.py
+++ b/python/pyspark/tests.py
@@ -2008,6 +2008,7 @@ class NumPyTests(PySparkTestCase):
 
 
 if __name__ == "__main__":
+    from pyspark.tests import *
     if not _have_scipy:
         print("NOTE: Skipping SciPy tests as it does not seem to be installed")
     if not _have_numpy:


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org