You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2020/05/16 14:35:17 UTC
[spark] branch master updated: [SPARK-31732][TESTS] Disable some
flaky tests temporarily
This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 2012d58 [SPARK-31732][TESTS] Disable some flaky tests temporarily
2012d58 is described below
commit 2012d5847520c8aba54e8e3e6a634976a3c7657d
Author: Wenchen Fan <we...@databricks.com>
AuthorDate: Sat May 16 07:33:58 2020 -0700
[SPARK-31732][TESTS] Disable some flaky tests temporarily
### What changes were proposed in this pull request?
It's quite annoying to be blocked by flaky tests in several PRs. This PR disables them. The tests come from 3 PRs I'm recently watching:
https://github.com/apache/spark/pull/28526
https://github.com/apache/spark/pull/28463
https://github.com/apache/spark/pull/28517
### Why are the changes needed?
To make PR builder more stable
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
N/A
Closes #28547 from cloud-fan/test.
Authored-by: Wenchen Fan <we...@databricks.com>
Signed-off-by: Dongjoon Hyun <do...@apache.org>
---
.../org/apache/spark/deploy/history/HistoryServerSuite.scala | 3 ++-
.../org/apache/spark/scheduler/BarrierTaskContextSuite.scala | 6 ++++--
.../apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala | 3 ++-
.../scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala | 3 ++-
.../apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala | 8 +++++---
.../scala/org/apache/spark/streaming/StreamingContextSuite.scala | 3 ++-
6 files changed, 17 insertions(+), 9 deletions(-)
diff --git a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala
index 56cc3da..c55b29b 100644
--- a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala
@@ -314,7 +314,8 @@ class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers
all (directSiteRelativeLinks) should not startWith (knoxBaseUrl)
}
- test("static relative links are prefixed with uiRoot (spark.ui.proxyBase)") {
+ // TODO (SPARK-31723): re-enable it
+ ignore("static relative links are prefixed with uiRoot (spark.ui.proxyBase)") {
val uiRoot = Option(System.getenv("APPLICATION_WEB_PROXY_BASE")).getOrElse("/testwebproxybase")
val page = new HistoryPage(server)
val request = mock[HttpServletRequest]
diff --git a/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala
index c4e5e7c..17bc339 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala
@@ -39,7 +39,8 @@ class BarrierTaskContextSuite extends SparkFunSuite with LocalSparkContext with
sc = new SparkContext(conf)
}
- test("global sync by barrier() call") {
+ // TODO (SPARK-31730): re-enable it
+ ignore("global sync by barrier() call") {
initLocalClusterSparkContext()
val rdd = sc.makeRDD(1 to 10, 4)
val rdd2 = rdd.barrier().mapPartitions { it =>
@@ -131,7 +132,8 @@ class BarrierTaskContextSuite extends SparkFunSuite with LocalSparkContext with
assert(times2.max - times2.min <= 1000)
}
- test("support multiple barrier() call within a single task") {
+ // TODO (SPARK-31730): re-enable it
+ ignore("support multiple barrier() call within a single task") {
initLocalClusterSparkContext()
val rdd = sc.makeRDD(1 to 10, 4)
val rdd2 = rdd.barrier().mapPartitions { it =>
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
index a4601b9..bdad214 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
@@ -349,7 +349,8 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
)
}
- test("subscribing topic by pattern with topic deletions") {
+ // TODO (SPARK-31731): re-enable it
+ ignore("subscribing topic by pattern with topic deletions") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-seems"
val topic2 = topicPrefix + "-bad"
diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
index 32d0561..e5f3a22 100644
--- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
+++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
@@ -179,7 +179,8 @@ abstract class KafkaRelationSuiteBase extends QueryTest with SharedSparkSession
("3", Seq(("e", "f".getBytes(UTF_8)), ("e", "g".getBytes(UTF_8))))).toDF)
}
- test("timestamp provided for starting and ending") {
+ // TODO (SPARK-31729): re-enable it
+ ignore("timestamp provided for starting and ending") {
val (topic, timestamps) = prepareTimestampRelatedUnitTest
// timestamp both presented: starting "first" ending "finalized"
diff --git a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala
index 925327d..72cf3e8 100644
--- a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala
+++ b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala
@@ -332,7 +332,8 @@ class DirectKafkaStreamSuite
}
// Test to verify the offset ranges can be recovered from the checkpoints
- test("offset recovery") {
+ // TODO (SPARK-31722): re-enable it
+ ignore("offset recovery") {
val topic = "recovery"
kafkaTestUtils.createTopic(topic)
testDir = Utils.createTempDir()
@@ -418,8 +419,9 @@ class DirectKafkaStreamSuite
ssc.stop()
}
- // Test to verify the offsets can be recovered from Kafka
- test("offset recovery from kafka") {
+ // Test to verify the offsets can be recovered from Kafka
+ // TODO (SPARK-31722): re-enable it
+ ignore("offset recovery from kafka") {
val topic = "recoveryfromkafka"
kafkaTestUtils.createTopic(topic)
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
index 1d66378..4eff464 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
@@ -293,7 +293,8 @@ class StreamingContextSuite
}
}
- test("stop gracefully") {
+ // TODO (SPARK-31728): re-enable it
+ ignore("stop gracefully") {
val conf = new SparkConf().setMaster(master).setAppName(appName)
conf.set("spark.dummyTimeConfig", "3600s")
val sc = new SparkContext(conf)
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org