You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2014/07/16 06:34:11 UTC

git commit: follow pep8 None should be compared using is or is not

Repository: spark
Updated Branches:
  refs/heads/master 9c12de509 -> 563acf5ed


follow pep8 None should be compared using is or is not

http://legacy.python.org/dev/peps/pep-0008/
## Programming Recommendations
- Comparisons to singletons like None should always be done with is or is not, never the equality operators.

Author: Ken Takagiwa <ke...@Kens-MacBook-Pro.local>

Closes #1422 from giwa/apache_master and squashes the following commits:

7b361f3 [Ken Takagiwa] follow pep8 None should be checked using is or is not


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/563acf5e
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/563acf5e
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/563acf5e

Branch: refs/heads/master
Commit: 563acf5edfbfb2fa756a1f0accde0940592663e9
Parents: 9c12de5
Author: Ken Takagiwa <ke...@Kens-MacBook-Pro.local>
Authored: Tue Jul 15 21:34:05 2014 -0700
Committer: Reynold Xin <rx...@apache.org>
Committed: Tue Jul 15 21:34:05 2014 -0700

----------------------------------------------------------------------
 python/pyspark/cloudpickle.py | 4 ++--
 python/pyspark/conf.py        | 4 ++--
 python/pyspark/rddsampler.py  | 2 +-
 python/pyspark/shell.py       | 4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/563acf5e/python/pyspark/cloudpickle.py
----------------------------------------------------------------------
diff --git a/python/pyspark/cloudpickle.py b/python/pyspark/cloudpickle.py
index eb5dbb8..4fda2a9 100644
--- a/python/pyspark/cloudpickle.py
+++ b/python/pyspark/cloudpickle.py
@@ -243,10 +243,10 @@ class CloudPickler(pickle.Pickler):
         # if func is lambda, def'ed at prompt, is in main, or is nested, then
         # we'll pickle the actual function object rather than simply saving a
         # reference (as is done in default pickler), via save_function_tuple.
-        if islambda(obj) or obj.func_code.co_filename == '<stdin>' or themodule == None:
+        if islambda(obj) or obj.func_code.co_filename == '<stdin>' or themodule is None:
             #Force server to import modules that have been imported in main
             modList = None
-            if themodule == None and not self.savedForceImports:
+            if themodule is None and not self.savedForceImports:
                 mainmod = sys.modules['__main__']
                 if useForcedImports and hasattr(mainmod,'___pyc_forcedImports__'):
                     modList = list(mainmod.___pyc_forcedImports__)

http://git-wip-us.apache.org/repos/asf/spark/blob/563acf5e/python/pyspark/conf.py
----------------------------------------------------------------------
diff --git a/python/pyspark/conf.py b/python/pyspark/conf.py
index 8eff4a2..60fc6ba 100644
--- a/python/pyspark/conf.py
+++ b/python/pyspark/conf.py
@@ -30,7 +30,7 @@ u'My app'
 u'local'
 >>> sc.appName
 u'My app'
->>> sc.sparkHome == None
+>>> sc.sparkHome is None
 True
 
 >>> conf = SparkConf(loadDefaults=False)
@@ -116,7 +116,7 @@ class SparkConf(object):
 
     def setExecutorEnv(self, key=None, value=None, pairs=None):
         """Set an environment variable to be passed to executors."""
-        if (key != None and pairs != None) or (key == None and pairs == None):
+        if (key is not None and pairs is not None) or (key is None and pairs is None):
             raise Exception("Either pass one key-value pair or a list of pairs")
         elif key != None:
             self._jconf.setExecutorEnv(key, value)

http://git-wip-us.apache.org/repos/asf/spark/blob/563acf5e/python/pyspark/rddsampler.py
----------------------------------------------------------------------
diff --git a/python/pyspark/rddsampler.py b/python/pyspark/rddsampler.py
index 845a267..122bc38 100644
--- a/python/pyspark/rddsampler.py
+++ b/python/pyspark/rddsampler.py
@@ -82,7 +82,7 @@ class RDDSampler(object):
             return (num_arrivals - 1)
     
     def shuffle(self, vals):
-        if self._random == None:
+        if self._random is None:
             self.initRandomGenerator(0)  # this should only ever called on the master so
             # the split does not matter
         

http://git-wip-us.apache.org/repos/asf/spark/blob/563acf5e/python/pyspark/shell.py
----------------------------------------------------------------------
diff --git a/python/pyspark/shell.py b/python/pyspark/shell.py
index ebd714d..2ce5409 100644
--- a/python/pyspark/shell.py
+++ b/python/pyspark/shell.py
@@ -35,7 +35,7 @@ from pyspark.context import SparkContext
 from pyspark.storagelevel import StorageLevel
 
 # this is the equivalent of ADD_JARS
-add_files = os.environ.get("ADD_FILES").split(',') if os.environ.get("ADD_FILES") != None else None
+add_files = os.environ.get("ADD_FILES").split(',') if os.environ.get("ADD_FILES") is not None else None
 
 if os.environ.get("SPARK_EXECUTOR_URI"):
     SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
@@ -55,7 +55,7 @@ print("Using Python version %s (%s, %s)" % (
     platform.python_build()[1]))
 print("SparkContext available as sc.")
 
-if add_files != None:
+if add_files is not None:
     print("Adding files: [%s]" % ", ".join(add_files))
 
 # The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,