You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2018/03/08 11:38:40 UTC

spark git commit: [SPARK-23522][PYTHON] always use sys.exit over builtin exit

Repository: spark
Updated Branches:
  refs/heads/master 2cb23a8f5 -> 7013eea11


[SPARK-23522][PYTHON] always use sys.exit over builtin exit

The exit() builtin is only for interactive use. applications should use sys.exit().

## What changes were proposed in this pull request?

All usage of the builtin `exit()` function is replaced by `sys.exit()`.

## How was this patch tested?

I ran `python/run-tests`.

Please review http://spark.apache.org/contributing.html before opening a pull request.

Author: Benjamin Peterson <be...@python.org>

Closes #20682 from benjaminp/sys-exit.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/7013eea1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/7013eea1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/7013eea1

Branch: refs/heads/master
Commit: 7013eea11cb32b1e0038dc751c485da5c94a484b
Parents: 2cb23a8
Author: Benjamin Peterson <be...@python.org>
Authored: Thu Mar 8 20:38:34 2018 +0900
Committer: hyukjinkwon <gu...@gmail.com>
Committed: Thu Mar 8 20:38:34 2018 +0900

----------------------------------------------------------------------
 dev/merge_spark_pr.py                                          | 2 +-
 dev/run-tests.py                                               | 2 +-
 examples/src/main/python/avro_inputformat.py                   | 2 +-
 examples/src/main/python/kmeans.py                             | 2 +-
 examples/src/main/python/logistic_regression.py                | 2 +-
 examples/src/main/python/ml/dataframe_example.py               | 2 +-
 examples/src/main/python/mllib/correlations.py                 | 2 +-
 examples/src/main/python/mllib/kmeans.py                       | 2 +-
 examples/src/main/python/mllib/logistic_regression.py          | 2 +-
 examples/src/main/python/mllib/random_rdd_generation.py        | 2 +-
 examples/src/main/python/mllib/sampled_rdds.py                 | 4 ++--
 .../main/python/mllib/streaming_linear_regression_example.py   | 2 +-
 examples/src/main/python/pagerank.py                           | 2 +-
 examples/src/main/python/parquet_inputformat.py                | 2 +-
 examples/src/main/python/sort.py                               | 2 +-
 .../main/python/sql/streaming/structured_kafka_wordcount.py    | 2 +-
 .../main/python/sql/streaming/structured_network_wordcount.py  | 2 +-
 .../sql/streaming/structured_network_wordcount_windowed.py     | 2 +-
 examples/src/main/python/streaming/direct_kafka_wordcount.py   | 2 +-
 examples/src/main/python/streaming/flume_wordcount.py          | 2 +-
 examples/src/main/python/streaming/hdfs_wordcount.py           | 2 +-
 examples/src/main/python/streaming/kafka_wordcount.py          | 2 +-
 examples/src/main/python/streaming/network_wordcount.py        | 2 +-
 .../src/main/python/streaming/network_wordjoinsentiments.py    | 2 +-
 .../src/main/python/streaming/recoverable_network_wordcount.py | 2 +-
 examples/src/main/python/streaming/sql_network_wordcount.py    | 2 +-
 .../src/main/python/streaming/stateful_network_wordcount.py    | 2 +-
 examples/src/main/python/wordcount.py                          | 2 +-
 python/pyspark/accumulators.py                                 | 2 +-
 python/pyspark/broadcast.py                                    | 2 +-
 python/pyspark/conf.py                                         | 2 +-
 python/pyspark/context.py                                      | 2 +-
 python/pyspark/daemon.py                                       | 2 +-
 python/pyspark/find_spark_home.py                              | 2 +-
 python/pyspark/heapq3.py                                       | 3 ++-
 python/pyspark/ml/classification.py                            | 3 ++-
 python/pyspark/ml/clustering.py                                | 4 +++-
 python/pyspark/ml/evaluation.py                                | 3 ++-
 python/pyspark/ml/feature.py                                   | 2 +-
 python/pyspark/ml/image.py                                     | 4 +++-
 python/pyspark/ml/linalg/__init__.py                           | 2 +-
 python/pyspark/ml/recommendation.py                            | 4 +++-
 python/pyspark/ml/regression.py                                | 3 ++-
 python/pyspark/ml/stat.py                                      | 4 +++-
 python/pyspark/ml/tuning.py                                    | 6 ++++--
 python/pyspark/mllib/classification.py                         | 3 ++-
 python/pyspark/mllib/clustering.py                             | 2 +-
 python/pyspark/mllib/evaluation.py                             | 3 ++-
 python/pyspark/mllib/feature.py                                | 2 +-
 python/pyspark/mllib/fpm.py                                    | 4 +++-
 python/pyspark/mllib/linalg/__init__.py                        | 2 +-
 python/pyspark/mllib/linalg/distributed.py                     | 2 +-
 python/pyspark/mllib/random.py                                 | 3 ++-
 python/pyspark/mllib/recommendation.py                         | 3 ++-
 python/pyspark/mllib/regression.py                             | 6 ++++--
 python/pyspark/mllib/stat/_statistics.py                       | 2 +-
 python/pyspark/mllib/tree.py                                   | 3 ++-
 python/pyspark/mllib/util.py                                   | 2 +-
 python/pyspark/profiler.py                                     | 3 ++-
 python/pyspark/rdd.py                                          | 2 +-
 python/pyspark/serializers.py                                  | 2 +-
 python/pyspark/shuffle.py                                      | 3 ++-
 python/pyspark/sql/catalog.py                                  | 3 ++-
 python/pyspark/sql/column.py                                   | 2 +-
 python/pyspark/sql/conf.py                                     | 4 +++-
 python/pyspark/sql/context.py                                  | 2 +-
 python/pyspark/sql/dataframe.py                                | 2 +-
 python/pyspark/sql/functions.py                                | 2 +-
 python/pyspark/sql/group.py                                    | 4 +++-
 python/pyspark/sql/readwriter.py                               | 2 +-
 python/pyspark/sql/session.py                                  | 2 +-
 python/pyspark/sql/streaming.py                                | 2 +-
 python/pyspark/sql/types.py                                    | 2 +-
 python/pyspark/sql/udf.py                                      | 3 ++-
 python/pyspark/sql/window.py                                   | 2 +-
 python/pyspark/streaming/util.py                               | 3 ++-
 python/pyspark/util.py                                         | 4 +++-
 python/pyspark/worker.py                                       | 6 +++---
 python/setup.py                                                | 6 +++---
 79 files changed, 120 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/dev/merge_spark_pr.py
----------------------------------------------------------------------
diff --git a/dev/merge_spark_pr.py b/dev/merge_spark_pr.py
index 6b244d8..5ea205f 100755
--- a/dev/merge_spark_pr.py
+++ b/dev/merge_spark_pr.py
@@ -510,7 +510,7 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
     try:
         main()
     except:

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/dev/run-tests.py
----------------------------------------------------------------------
diff --git a/dev/run-tests.py b/dev/run-tests.py
index fe75ef4..164c1e2 100755
--- a/dev/run-tests.py
+++ b/dev/run-tests.py
@@ -621,7 +621,7 @@ def _test():
     import doctest
     failure_count = doctest.testmod()[0]
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/avro_inputformat.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/avro_inputformat.py b/examples/src/main/python/avro_inputformat.py
index 6286ba6..a18722c 100644
--- a/examples/src/main/python/avro_inputformat.py
+++ b/examples/src/main/python/avro_inputformat.py
@@ -61,7 +61,7 @@ if __name__ == "__main__":
         Assumes you have Avro data stored in <data_file>. Reader schema can be optionally specified
         in [reader_schema_file].
         """, file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     path = sys.argv[1]
 

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/kmeans.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/kmeans.py b/examples/src/main/python/kmeans.py
index 92e0a3a..a42d711 100755
--- a/examples/src/main/python/kmeans.py
+++ b/examples/src/main/python/kmeans.py
@@ -49,7 +49,7 @@ if __name__ == "__main__":
 
     if len(sys.argv) != 4:
         print("Usage: kmeans <file> <k> <convergeDist>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     print("""WARN: This is a naive implementation of KMeans Clustering and is given
        as an example! Please refer to examples/src/main/python/ml/kmeans_example.py for an

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/logistic_regression.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/logistic_regression.py b/examples/src/main/python/logistic_regression.py
index 01c9384..bcc4e0f 100755
--- a/examples/src/main/python/logistic_regression.py
+++ b/examples/src/main/python/logistic_regression.py
@@ -48,7 +48,7 @@ if __name__ == "__main__":
 
     if len(sys.argv) != 3:
         print("Usage: logistic_regression <file> <iterations>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     print("""WARN: This is a naive implementation of Logistic Regression and is
       given as an example!

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/ml/dataframe_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/dataframe_example.py b/examples/src/main/python/ml/dataframe_example.py
index 109f901..d62cf23 100644
--- a/examples/src/main/python/ml/dataframe_example.py
+++ b/examples/src/main/python/ml/dataframe_example.py
@@ -33,7 +33,7 @@ from pyspark.mllib.util import MLUtils
 if __name__ == "__main__":
     if len(sys.argv) > 2:
         print("Usage: dataframe_example.py <libsvm file>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     elif len(sys.argv) == 2:
         input = sys.argv[1]
     else:

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/mllib/correlations.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/correlations.py b/examples/src/main/python/mllib/correlations.py
index 0e13546..089504f 100755
--- a/examples/src/main/python/mllib/correlations.py
+++ b/examples/src/main/python/mllib/correlations.py
@@ -31,7 +31,7 @@ from pyspark.mllib.util import MLUtils
 if __name__ == "__main__":
     if len(sys.argv) not in [1, 2]:
         print("Usage: correlations (<file>)", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     sc = SparkContext(appName="PythonCorrelations")
     if len(sys.argv) == 2:
         filepath = sys.argv[1]

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/mllib/kmeans.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/kmeans.py b/examples/src/main/python/mllib/kmeans.py
index 002fc75..1bdb3e9 100755
--- a/examples/src/main/python/mllib/kmeans.py
+++ b/examples/src/main/python/mllib/kmeans.py
@@ -36,7 +36,7 @@ def parseVector(line):
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: kmeans <file> <k>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     sc = SparkContext(appName="KMeans")
     lines = sc.textFile(sys.argv[1])
     data = lines.map(parseVector)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/mllib/logistic_regression.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/logistic_regression.py b/examples/src/main/python/mllib/logistic_regression.py
index d4f1d34..87efe17 100755
--- a/examples/src/main/python/mllib/logistic_regression.py
+++ b/examples/src/main/python/mllib/logistic_regression.py
@@ -42,7 +42,7 @@ def parsePoint(line):
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: logistic_regression <file> <iterations>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     sc = SparkContext(appName="PythonLR")
     points = sc.textFile(sys.argv[1]).map(parsePoint)
     iterations = int(sys.argv[2])

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/mllib/random_rdd_generation.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/random_rdd_generation.py b/examples/src/main/python/mllib/random_rdd_generation.py
index 729bae3..9a429b5 100755
--- a/examples/src/main/python/mllib/random_rdd_generation.py
+++ b/examples/src/main/python/mllib/random_rdd_generation.py
@@ -29,7 +29,7 @@ from pyspark.mllib.random import RandomRDDs
 if __name__ == "__main__":
     if len(sys.argv) not in [1, 2]:
         print("Usage: random_rdd_generation", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonRandomRDDGeneration")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/mllib/sampled_rdds.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/sampled_rdds.py b/examples/src/main/python/mllib/sampled_rdds.py
index b7033ab..00e7cf4 100755
--- a/examples/src/main/python/mllib/sampled_rdds.py
+++ b/examples/src/main/python/mllib/sampled_rdds.py
@@ -29,7 +29,7 @@ from pyspark.mllib.util import MLUtils
 if __name__ == "__main__":
     if len(sys.argv) not in [1, 2]:
         print("Usage: sampled_rdds <libsvm data file>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     if len(sys.argv) == 2:
         datapath = sys.argv[1]
     else:
@@ -43,7 +43,7 @@ if __name__ == "__main__":
     numExamples = examples.count()
     if numExamples == 0:
         print("Error: Data file had no samples to load.", file=sys.stderr)
-        exit(1)
+        sys.exit(1)
     print('Loaded data with %d examples from file: %s' % (numExamples, datapath))
 
     # Example: RDD.sample() and RDD.takeSample()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/mllib/streaming_linear_regression_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/streaming_linear_regression_example.py b/examples/src/main/python/mllib/streaming_linear_regression_example.py
index f600496..714c9a0 100644
--- a/examples/src/main/python/mllib/streaming_linear_regression_example.py
+++ b/examples/src/main/python/mllib/streaming_linear_regression_example.py
@@ -36,7 +36,7 @@ if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: streaming_linear_regression_example.py <trainingDir> <testDir>",
               file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
     ssc = StreamingContext(sc, 1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/pagerank.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/pagerank.py b/examples/src/main/python/pagerank.py
index 0d6c253..2c19e87 100755
--- a/examples/src/main/python/pagerank.py
+++ b/examples/src/main/python/pagerank.py
@@ -47,7 +47,7 @@ def parseNeighbors(urls):
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: pagerank <file> <iterations>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     print("WARN: This is a naive implementation of PageRank and is given as an example!\n" +
           "Please refer to PageRank implementation provided by graphx",

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/parquet_inputformat.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/parquet_inputformat.py b/examples/src/main/python/parquet_inputformat.py
index a3f86cf..83041f0 100644
--- a/examples/src/main/python/parquet_inputformat.py
+++ b/examples/src/main/python/parquet_inputformat.py
@@ -45,7 +45,7 @@ if __name__ == "__main__":
                 /path/to/examples/parquet_inputformat.py <data_file>
         Assumes you have Parquet data stored in <data_file>.
         """, file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     path = sys.argv[1]
 

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/sort.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sort.py b/examples/src/main/python/sort.py
index 81898cf..d3cd985 100755
--- a/examples/src/main/python/sort.py
+++ b/examples/src/main/python/sort.py
@@ -25,7 +25,7 @@ from pyspark.sql import SparkSession
 if __name__ == "__main__":
     if len(sys.argv) != 2:
         print("Usage: sort <file>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     spark = SparkSession\
         .builder\

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/sql/streaming/structured_kafka_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sql/streaming/structured_kafka_wordcount.py b/examples/src/main/python/sql/streaming/structured_kafka_wordcount.py
index 9e8a552..9210678 100644
--- a/examples/src/main/python/sql/streaming/structured_kafka_wordcount.py
+++ b/examples/src/main/python/sql/streaming/structured_kafka_wordcount.py
@@ -49,7 +49,7 @@ if __name__ == "__main__":
         print("""
         Usage: structured_kafka_wordcount.py <bootstrap-servers> <subscribe-type> <topics>
         """, file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     bootstrapServers = sys.argv[1]
     subscribeType = sys.argv[2]

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/sql/streaming/structured_network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sql/streaming/structured_network_wordcount.py b/examples/src/main/python/sql/streaming/structured_network_wordcount.py
index c3284c1..9ac3921 100644
--- a/examples/src/main/python/sql/streaming/structured_network_wordcount.py
+++ b/examples/src/main/python/sql/streaming/structured_network_wordcount.py
@@ -38,7 +38,7 @@ from pyspark.sql.functions import split
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     host = sys.argv[1]
     port = int(sys.argv[2])

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py b/examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
index db67255..c4e3bbf 100644
--- a/examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
+++ b/examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
@@ -53,7 +53,7 @@ if __name__ == "__main__":
         msg = ("Usage: structured_network_wordcount_windowed.py <hostname> <port> "
                "<window duration in seconds> [<slide duration in seconds>]")
         print(msg, file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     host = sys.argv[1]
     port = int(sys.argv[2])

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/direct_kafka_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/direct_kafka_wordcount.py b/examples/src/main/python/streaming/direct_kafka_wordcount.py
index 425df30..c5c186c 100644
--- a/examples/src/main/python/streaming/direct_kafka_wordcount.py
+++ b/examples/src/main/python/streaming/direct_kafka_wordcount.py
@@ -39,7 +39,7 @@ from pyspark.streaming.kafka import KafkaUtils
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: direct_kafka_wordcount.py <broker_list> <topic>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonStreamingDirectKafkaWordCount")
     ssc = StreamingContext(sc, 2)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/flume_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/flume_wordcount.py b/examples/src/main/python/streaming/flume_wordcount.py
index 5d6e6dc..c8ea92b 100644
--- a/examples/src/main/python/streaming/flume_wordcount.py
+++ b/examples/src/main/python/streaming/flume_wordcount.py
@@ -39,7 +39,7 @@ from pyspark.streaming.flume import FlumeUtils
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: flume_wordcount.py <hostname> <port>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonStreamingFlumeWordCount")
     ssc = StreamingContext(sc, 1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/hdfs_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/hdfs_wordcount.py b/examples/src/main/python/streaming/hdfs_wordcount.py
index f815dd2..f9a5c43 100644
--- a/examples/src/main/python/streaming/hdfs_wordcount.py
+++ b/examples/src/main/python/streaming/hdfs_wordcount.py
@@ -35,7 +35,7 @@ from pyspark.streaming import StreamingContext
 if __name__ == "__main__":
     if len(sys.argv) != 2:
         print("Usage: hdfs_wordcount.py <directory>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonStreamingHDFSWordCount")
     ssc = StreamingContext(sc, 1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/kafka_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/kafka_wordcount.py b/examples/src/main/python/streaming/kafka_wordcount.py
index 704f660..e9ee08b 100644
--- a/examples/src/main/python/streaming/kafka_wordcount.py
+++ b/examples/src/main/python/streaming/kafka_wordcount.py
@@ -39,7 +39,7 @@ from pyspark.streaming.kafka import KafkaUtils
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: kafka_wordcount.py <zk> <topic>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonStreamingKafkaWordCount")
     ssc = StreamingContext(sc, 1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/network_wordcount.py b/examples/src/main/python/streaming/network_wordcount.py
index 9010faf..f3099d2 100644
--- a/examples/src/main/python/streaming/network_wordcount.py
+++ b/examples/src/main/python/streaming/network_wordcount.py
@@ -35,7 +35,7 @@ from pyspark.streaming import StreamingContext
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: network_wordcount.py <hostname> <port>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     sc = SparkContext(appName="PythonStreamingNetworkWordCount")
     ssc = StreamingContext(sc, 1)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/network_wordjoinsentiments.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/network_wordjoinsentiments.py b/examples/src/main/python/streaming/network_wordjoinsentiments.py
index d51a380..2b5434c 100644
--- a/examples/src/main/python/streaming/network_wordjoinsentiments.py
+++ b/examples/src/main/python/streaming/network_wordjoinsentiments.py
@@ -47,7 +47,7 @@ def print_happiest_words(rdd):
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: network_wordjoinsentiments.py <hostname> <port>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     sc = SparkContext(appName="PythonStreamingNetworkWordJoinSentiments")
     ssc = StreamingContext(sc, 5)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/recoverable_network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/recoverable_network_wordcount.py b/examples/src/main/python/streaming/recoverable_network_wordcount.py
index 52b2639..60167dc 100644
--- a/examples/src/main/python/streaming/recoverable_network_wordcount.py
+++ b/examples/src/main/python/streaming/recoverable_network_wordcount.py
@@ -101,7 +101,7 @@ if __name__ == "__main__":
     if len(sys.argv) != 5:
         print("Usage: recoverable_network_wordcount.py <hostname> <port> "
               "<checkpoint-directory> <output-file>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     host, port, checkpoint, output = sys.argv[1:]
     ssc = StreamingContext.getOrCreate(checkpoint,
                                        lambda: createContext(host, int(port), output))

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/sql_network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py
index 7f12281..ab3cfc0 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -48,7 +48,7 @@ def getSparkSessionInstance(sparkConf):
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: sql_network_wordcount.py <hostname> <port> ", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     host, port = sys.argv[1:]
     sc = SparkContext(appName="PythonSqlNetworkWordCount")
     ssc = StreamingContext(sc, 1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/streaming/stateful_network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/stateful_network_wordcount.py b/examples/src/main/python/streaming/stateful_network_wordcount.py
index d7bb61e..d5d1eba 100644
--- a/examples/src/main/python/streaming/stateful_network_wordcount.py
+++ b/examples/src/main/python/streaming/stateful_network_wordcount.py
@@ -39,7 +39,7 @@ from pyspark.streaming import StreamingContext
 if __name__ == "__main__":
     if len(sys.argv) != 3:
         print("Usage: stateful_network_wordcount.py <hostname> <port>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     sc = SparkContext(appName="PythonStreamingStatefulNetworkWordCount")
     ssc = StreamingContext(sc, 1)
     ssc.checkpoint("checkpoint")

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/examples/src/main/python/wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/wordcount.py b/examples/src/main/python/wordcount.py
index 3d5e44d..a05e24f 100755
--- a/examples/src/main/python/wordcount.py
+++ b/examples/src/main/python/wordcount.py
@@ -26,7 +26,7 @@ from pyspark.sql import SparkSession
 if __name__ == "__main__":
     if len(sys.argv) != 2:
         print("Usage: wordcount <file>", file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     spark = SparkSession\
         .builder\

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/accumulators.py
----------------------------------------------------------------------
diff --git a/python/pyspark/accumulators.py b/python/pyspark/accumulators.py
index 7def676..f730d29 100644
--- a/python/pyspark/accumulators.py
+++ b/python/pyspark/accumulators.py
@@ -265,4 +265,4 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/broadcast.py
----------------------------------------------------------------------
diff --git a/python/pyspark/broadcast.py b/python/pyspark/broadcast.py
index 02fc515..b3dfc99 100644
--- a/python/pyspark/broadcast.py
+++ b/python/pyspark/broadcast.py
@@ -162,4 +162,4 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/conf.py
----------------------------------------------------------------------
diff --git a/python/pyspark/conf.py b/python/pyspark/conf.py
index 491b3a8..ab429d9 100644
--- a/python/pyspark/conf.py
+++ b/python/pyspark/conf.py
@@ -217,7 +217,7 @@ def _test():
     import doctest
     (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/context.py
----------------------------------------------------------------------
diff --git a/python/pyspark/context.py b/python/pyspark/context.py
index 24905f1..7c66496 100644
--- a/python/pyspark/context.py
+++ b/python/pyspark/context.py
@@ -1035,7 +1035,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/daemon.py
----------------------------------------------------------------------
diff --git a/python/pyspark/daemon.py b/python/pyspark/daemon.py
index 7f06d42..7bed521 100644
--- a/python/pyspark/daemon.py
+++ b/python/pyspark/daemon.py
@@ -89,7 +89,7 @@ def manager():
         signal.signal(SIGTERM, SIG_DFL)
         # Send SIGHUP to notify workers of shutdown
         os.kill(0, SIGHUP)
-        exit(code)
+        sys.exit(code)
 
     def handle_sigterm(*args):
         shutdown(1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/find_spark_home.py
----------------------------------------------------------------------
diff --git a/python/pyspark/find_spark_home.py b/python/pyspark/find_spark_home.py
index 212a618..9cf0e8c 100755
--- a/python/pyspark/find_spark_home.py
+++ b/python/pyspark/find_spark_home.py
@@ -68,7 +68,7 @@ def _find_spark_home():
         return next(path for path in paths if is_spark_home(path))
     except StopIteration:
         print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     print(_find_spark_home())

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/heapq3.py
----------------------------------------------------------------------
diff --git a/python/pyspark/heapq3.py b/python/pyspark/heapq3.py
index b27e91a..6af084a 100644
--- a/python/pyspark/heapq3.py
+++ b/python/pyspark/heapq3.py
@@ -884,6 +884,7 @@ except ImportError:
 
 if __name__ == "__main__":
     import doctest
+    import sys
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/classification.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/classification.py b/python/pyspark/ml/classification.py
index 27ad1e8..fbbe3d0 100644
--- a/python/pyspark/ml/classification.py
+++ b/python/pyspark/ml/classification.py
@@ -16,6 +16,7 @@
 #
 
 import operator
+import sys
 from multiprocessing.pool import ThreadPool
 
 from pyspark import since, keyword_only
@@ -2043,4 +2044,4 @@ if __name__ == "__main__":
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/clustering.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/clustering.py b/python/pyspark/ml/clustering.py
index 6448b76..b3d5fb1 100644
--- a/python/pyspark/ml/clustering.py
+++ b/python/pyspark/ml/clustering.py
@@ -15,6 +15,8 @@
 # limitations under the License.
 #
 
+import sys
+
 from pyspark import since, keyword_only
 from pyspark.ml.util import *
 from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
@@ -1181,4 +1183,4 @@ if __name__ == "__main__":
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/evaluation.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/evaluation.py b/python/pyspark/ml/evaluation.py
index 695d8ab..8eaf076 100644
--- a/python/pyspark/ml/evaluation.py
+++ b/python/pyspark/ml/evaluation.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 #
 
+import sys
 from abc import abstractmethod, ABCMeta
 
 from pyspark import since, keyword_only
@@ -446,4 +447,4 @@ if __name__ == "__main__":
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/feature.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py
index 04b07e6..f2e357f 100755
--- a/python/pyspark/ml/feature.py
+++ b/python/pyspark/ml/feature.py
@@ -3717,4 +3717,4 @@ if __name__ == "__main__":
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/image.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/image.py b/python/pyspark/ml/image.py
index 45c9366..96d702f 100644
--- a/python/pyspark/ml/image.py
+++ b/python/pyspark/ml/image.py
@@ -24,6 +24,8 @@
    :members:
 """
 
+import sys
+
 import numpy as np
 from pyspark import SparkContext
 from pyspark.sql.types import Row, _create_row, _parse_datatype_json_string
@@ -251,7 +253,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/linalg/__init__.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/linalg/__init__.py b/python/pyspark/ml/linalg/__init__.py
index ad1b487..6a611a2 100644
--- a/python/pyspark/ml/linalg/__init__.py
+++ b/python/pyspark/ml/linalg/__init__.py
@@ -1158,7 +1158,7 @@ def _test():
     import doctest
     (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/recommendation.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/recommendation.py b/python/pyspark/ml/recommendation.py
index e8bcbe4..a8eae9b 100644
--- a/python/pyspark/ml/recommendation.py
+++ b/python/pyspark/ml/recommendation.py
@@ -15,6 +15,8 @@
 # limitations under the License.
 #
 
+import sys
+
 from pyspark import since, keyword_only
 from pyspark.ml.util import *
 from pyspark.ml.wrapper import JavaEstimator, JavaModel
@@ -480,4 +482,4 @@ if __name__ == "__main__":
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/regression.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/regression.py b/python/pyspark/ml/regression.py
index f0812bd..de0a0fa 100644
--- a/python/pyspark/ml/regression.py
+++ b/python/pyspark/ml/regression.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 #
 
+import sys
 import warnings
 
 from pyspark import since, keyword_only
@@ -1812,4 +1813,4 @@ if __name__ == "__main__":
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/stat.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/stat.py b/python/pyspark/ml/stat.py
index 079b083..0eeb5e5 100644
--- a/python/pyspark/ml/stat.py
+++ b/python/pyspark/ml/stat.py
@@ -15,6 +15,8 @@
 # limitations under the License.
 #
 
+import sys
+
 from pyspark import since, SparkContext
 from pyspark.ml.common import _java2py, _py2java
 from pyspark.ml.wrapper import _jvm
@@ -151,4 +153,4 @@ if __name__ == "__main__":
     failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/ml/tuning.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/tuning.py b/python/pyspark/ml/tuning.py
index 6c0cad6..545e24c 100644
--- a/python/pyspark/ml/tuning.py
+++ b/python/pyspark/ml/tuning.py
@@ -15,9 +15,11 @@
 # limitations under the License.
 #
 import itertools
-import numpy as np
+import sys
 from multiprocessing.pool import ThreadPool
 
+import numpy as np
+
 from pyspark import since, keyword_only
 from pyspark.ml import Estimator, Model
 from pyspark.ml.common import _py2java
@@ -727,4 +729,4 @@ if __name__ == "__main__":
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/classification.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/classification.py b/python/pyspark/mllib/classification.py
index cce703d..bb28198 100644
--- a/python/pyspark/mllib/classification.py
+++ b/python/pyspark/mllib/classification.py
@@ -16,6 +16,7 @@
 #
 
 from math import exp
+import sys
 import warnings
 
 import numpy
@@ -761,7 +762,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/clustering.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/clustering.py b/python/pyspark/mllib/clustering.py
index bb687a7..0cbabab 100644
--- a/python/pyspark/mllib/clustering.py
+++ b/python/pyspark/mllib/clustering.py
@@ -1048,7 +1048,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/evaluation.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/evaluation.py b/python/pyspark/mllib/evaluation.py
index 2cd1da3..36cb033 100644
--- a/python/pyspark/mllib/evaluation.py
+++ b/python/pyspark/mllib/evaluation.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 #
 
+import sys
 import warnings
 
 from pyspark import since
@@ -542,7 +543,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/feature.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index e5231dc..40ecd2e 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -819,7 +819,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     sys.path.pop(0)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/fpm.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/fpm.py b/python/pyspark/mllib/fpm.py
index f58ea5d..de18dad 100644
--- a/python/pyspark/mllib/fpm.py
+++ b/python/pyspark/mllib/fpm.py
@@ -15,6 +15,8 @@
 # limitations under the License.
 #
 
+import sys
+
 import numpy
 from numpy import array
 from collections import namedtuple
@@ -197,7 +199,7 @@ def _test():
         except OSError:
             pass
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/linalg/__init__.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/linalg/__init__.py b/python/pyspark/mllib/linalg/__init__.py
index 7b24b3c..60d96d8 100644
--- a/python/pyspark/mllib/linalg/__init__.py
+++ b/python/pyspark/mllib/linalg/__init__.py
@@ -1370,7 +1370,7 @@ def _test():
     import doctest
     (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/linalg/distributed.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/linalg/distributed.py b/python/pyspark/mllib/linalg/distributed.py
index 4cb8025..bba8854 100644
--- a/python/pyspark/mllib/linalg/distributed.py
+++ b/python/pyspark/mllib/linalg/distributed.py
@@ -1377,7 +1377,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/random.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/random.py b/python/pyspark/mllib/random.py
index 61213dd..a8833cb 100644
--- a/python/pyspark/mllib/random.py
+++ b/python/pyspark/mllib/random.py
@@ -19,6 +19,7 @@
 Python package for random data generation.
 """
 
+import sys
 from functools import wraps
 
 from pyspark import since
@@ -421,7 +422,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/recommendation.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/recommendation.py b/python/pyspark/mllib/recommendation.py
index 8118288..3d4eae8 100644
--- a/python/pyspark/mllib/recommendation.py
+++ b/python/pyspark/mllib/recommendation.py
@@ -16,6 +16,7 @@
 #
 
 import array
+import sys
 from collections import namedtuple
 
 from pyspark import SparkContext, since
@@ -326,7 +327,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/regression.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/regression.py b/python/pyspark/mllib/regression.py
index ea107d4..6be45f5 100644
--- a/python/pyspark/mllib/regression.py
+++ b/python/pyspark/mllib/regression.py
@@ -15,9 +15,11 @@
 # limitations under the License.
 #
 
+import sys
+import warnings
+
 import numpy as np
 from numpy import array
-import warnings
 
 from pyspark import RDD, since
 from pyspark.streaming.dstream import DStream
@@ -837,7 +839,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/stat/_statistics.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/stat/_statistics.py b/python/pyspark/mllib/stat/_statistics.py
index 49b2644..3c75b13 100644
--- a/python/pyspark/mllib/stat/_statistics.py
+++ b/python/pyspark/mllib/stat/_statistics.py
@@ -313,7 +313,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/tree.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/tree.py b/python/pyspark/mllib/tree.py
index 619fa16..b05734c 100644
--- a/python/pyspark/mllib/tree.py
+++ b/python/pyspark/mllib/tree.py
@@ -17,6 +17,7 @@
 
 from __future__ import absolute_import
 
+import sys
 import random
 
 from pyspark import SparkContext, RDD, since
@@ -654,7 +655,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/mllib/util.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/util.py b/python/pyspark/mllib/util.py
index 9775580..fc78093 100644
--- a/python/pyspark/mllib/util.py
+++ b/python/pyspark/mllib/util.py
@@ -521,7 +521,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/profiler.py
----------------------------------------------------------------------
diff --git a/python/pyspark/profiler.py b/python/pyspark/profiler.py
index 44d17bd..3c7656a 100644
--- a/python/pyspark/profiler.py
+++ b/python/pyspark/profiler.py
@@ -19,6 +19,7 @@ import cProfile
 import pstats
 import os
 import atexit
+import sys
 
 from pyspark.accumulators import AccumulatorParam
 
@@ -173,4 +174,4 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/rdd.py
----------------------------------------------------------------------
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index 93b8974..4b44f76 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -2498,7 +2498,7 @@ def _test():
         globs=globs, optionflags=doctest.ELLIPSIS)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/serializers.py
----------------------------------------------------------------------
diff --git a/python/pyspark/serializers.py b/python/pyspark/serializers.py
index ebf5493..15753f7 100644
--- a/python/pyspark/serializers.py
+++ b/python/pyspark/serializers.py
@@ -715,4 +715,4 @@ if __name__ == '__main__':
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/shuffle.py
----------------------------------------------------------------------
diff --git a/python/pyspark/shuffle.py b/python/pyspark/shuffle.py
index e974cda..02c7733 100644
--- a/python/pyspark/shuffle.py
+++ b/python/pyspark/shuffle.py
@@ -23,6 +23,7 @@ import gc
 import itertools
 import operator
 import random
+import sys
 
 import pyspark.heapq3 as heapq
 from pyspark.serializers import BatchedSerializer, PickleSerializer, FlattenedValuesSerializer, \
@@ -810,4 +811,4 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/catalog.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/catalog.py b/python/pyspark/sql/catalog.py
index 6aef0f2..b0d8357 100644
--- a/python/pyspark/sql/catalog.py
+++ b/python/pyspark/sql/catalog.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 #
 
+import sys
 import warnings
 from collections import namedtuple
 
@@ -306,7 +307,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/column.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py
index 43b38a2..e05a7b3 100644
--- a/python/pyspark/sql/column.py
+++ b/python/pyspark/sql/column.py
@@ -660,7 +660,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/conf.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/conf.py b/python/pyspark/sql/conf.py
index 792c420..d929834 100644
--- a/python/pyspark/sql/conf.py
+++ b/python/pyspark/sql/conf.py
@@ -15,6 +15,8 @@
 # limitations under the License.
 #
 
+import sys
+
 from pyspark import since
 from pyspark.rdd import ignore_unicode_prefix
 
@@ -80,7 +82,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(pyspark.sql.conf, globs=globs)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/context.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index cc1cd1a..6cb9039 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -543,7 +543,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/dataframe.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 8f90a36..3fc194d 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -2231,7 +2231,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/functions.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index dc1341a..dff5909 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -2404,7 +2404,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/group.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/group.py b/python/pyspark/sql/group.py
index ab64653..35cac40 100644
--- a/python/pyspark/sql/group.py
+++ b/python/pyspark/sql/group.py
@@ -15,6 +15,8 @@
 # limitations under the License.
 #
 
+import sys
+
 from pyspark import since
 from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
 from pyspark.sql.column import Column, _to_seq, _to_java_column, _create_column_from_literal
@@ -299,7 +301,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/readwriter.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index 9d05ac7..803f561 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -970,7 +970,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
     sc.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/session.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 215bb3e..e82a975 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -830,7 +830,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 if __name__ == "__main__":
     _test()

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/streaming.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py
index cc622de..e8966c2 100644
--- a/python/pyspark/sql/streaming.py
+++ b/python/pyspark/sql/streaming.py
@@ -930,7 +930,7 @@ def _test():
     globs['spark'].stop()
 
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/types.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py
index 1632862..826aab9 100644
--- a/python/pyspark/sql/types.py
+++ b/python/pyspark/sql/types.py
@@ -1890,7 +1890,7 @@ def _test():
     (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
     globs['sc'].stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/udf.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/udf.py b/python/pyspark/sql/udf.py
index ce804c1..24dd06c 100644
--- a/python/pyspark/sql/udf.py
+++ b/python/pyspark/sql/udf.py
@@ -20,6 +20,7 @@ User-defined function related classes and functions
 import sys
 import inspect
 import functools
+import sys
 
 from pyspark import SparkContext, since
 from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType, ignore_unicode_prefix
@@ -397,7 +398,7 @@ def _test():
         optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
     spark.stop()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/sql/window.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/window.py b/python/pyspark/sql/window.py
index bb841a9..e667fba 100644
--- a/python/pyspark/sql/window.py
+++ b/python/pyspark/sql/window.py
@@ -264,7 +264,7 @@ def _test():
     SparkContext('local[4]', 'PythonTest')
     (failure_count, test_count) = doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
     if failure_count:
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/streaming/util.py
----------------------------------------------------------------------
diff --git a/python/pyspark/streaming/util.py b/python/pyspark/streaming/util.py
index abbbf6e..df18447 100644
--- a/python/pyspark/streaming/util.py
+++ b/python/pyspark/streaming/util.py
@@ -18,6 +18,7 @@
 import time
 from datetime import datetime
 import traceback
+import sys
 
 from pyspark import SparkContext, RDD
 
@@ -147,4 +148,4 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/util.py
----------------------------------------------------------------------
diff --git a/python/pyspark/util.py b/python/pyspark/util.py
index 6837b18..ed1bdd0 100644
--- a/python/pyspark/util.py
+++ b/python/pyspark/util.py
@@ -22,6 +22,8 @@ from py4j.protocol import Py4JJavaError
 
 __all__ = []
 
+import sys
+
 
 def _exception_message(excp):
     """Return the message from an exception as either a str or unicode object.  Supports both
@@ -65,4 +67,4 @@ if __name__ == "__main__":
     import doctest
     (failure_count, test_count) = doctest.testmod()
     if failure_count:
-        exit(-1)
+        sys.exit(-1)

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/pyspark/worker.py
----------------------------------------------------------------------
diff --git a/python/pyspark/worker.py b/python/pyspark/worker.py
index 202cac3..a1a4336 100644
--- a/python/pyspark/worker.py
+++ b/python/pyspark/worker.py
@@ -205,7 +205,7 @@ def main(infile, outfile):
         boot_time = time.time()
         split_index = read_int(infile)
         if split_index == -1:  # for unit tests
-            exit(-1)
+            sys.exit(-1)
 
         version = utf8_deserializer.loads(infile)
         if version != "%d.%d" % sys.version_info[:2]:
@@ -279,7 +279,7 @@ def main(infile, outfile):
             # Write the error to stderr if it happened while serializing
             print("PySpark worker failed with exception:", file=sys.stderr)
             print(traceback.format_exc(), file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
     finish_time = time.time()
     report_times(outfile, boot_time, init_time, finish_time)
     write_long(shuffle.MemoryBytesSpilled, outfile)
@@ -297,7 +297,7 @@ def main(infile, outfile):
     else:
         # write a different value to tell JVM to not reuse this worker
         write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
-        exit(-1)
+        sys.exit(-1)
 
 
 if __name__ == '__main__':

http://git-wip-us.apache.org/repos/asf/spark/blob/7013eea1/python/setup.py
----------------------------------------------------------------------
diff --git a/python/setup.py b/python/setup.py
index 6a98401..794cece 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -26,7 +26,7 @@ from shutil import copyfile, copytree, rmtree
 if sys.version_info < (2, 7):
     print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
           file=sys.stderr)
-    exit(-1)
+    sys.exit(-1)
 
 try:
     exec(open('pyspark/version.py').read())
@@ -98,7 +98,7 @@ if (in_spark):
     except:
         print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
               file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
 # If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
 # ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
@@ -140,7 +140,7 @@ try:
 
     if not os.path.isdir(SCRIPTS_TARGET):
         print(incorrect_invocation_message, file=sys.stderr)
-        exit(-1)
+        sys.exit(-1)
 
     # Scripts directive requires a list of each script path and does not take wild cards.
     script_names = os.listdir(SCRIPTS_TARGET)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org