You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by sr...@apache.org on 2016/09/12 11:21:47 UTC
spark git commit: [SPARK-16992][PYSPARK] use map comprehension in doc
Repository: spark
Updated Branches:
refs/heads/master 4efcdb7fe -> b3c229122
[SPARK-16992][PYSPARK] use map comprehension in doc
Code is equivalent, but map comprehency is most of the time faster than a map.
Author: Gaetan Semet <ga...@xeberon.net>
Closes #14863 from Stibbons/map_comprehension.
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/b3c22912
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/b3c22912
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/b3c22912
Branch: refs/heads/master
Commit: b3c22912284c2a010a4af3c43dc5e6fd53c68f8c
Parents: 4efcdb7
Author: Gaetan Semet <ga...@xeberon.net>
Authored: Mon Sep 12 12:21:33 2016 +0100
Committer: Sean Owen <so...@cloudera.com>
Committed: Mon Sep 12 12:21:33 2016 +0100
----------------------------------------------------------------------
examples/src/main/python/ml/quantile_discretizer_example.py | 2 +-
examples/src/main/python/ml/vector_slicer_example.py | 4 ++--
examples/src/main/python/sql/hive.py | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/b3c22912/examples/src/main/python/ml/quantile_discretizer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/quantile_discretizer_example.py b/examples/src/main/python/ml/quantile_discretizer_example.py
index 788a0ba..0fc1d19 100644
--- a/examples/src/main/python/ml/quantile_discretizer_example.py
+++ b/examples/src/main/python/ml/quantile_discretizer_example.py
@@ -29,7 +29,7 @@ if __name__ == "__main__":
.getOrCreate()
# $example on$
- data = [(0, 18.0,), (1, 19.0,), (2, 8.0,), (3, 5.0,), (4, 2.2,)]
+ data = [(0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2)]
df = spark.createDataFrame(data, ["id", "hour"])
# $example off$
http://git-wip-us.apache.org/repos/asf/spark/blob/b3c22912/examples/src/main/python/ml/vector_slicer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/vector_slicer_example.py b/examples/src/main/python/ml/vector_slicer_example.py
index d2f46b1..68c8cfe 100644
--- a/examples/src/main/python/ml/vector_slicer_example.py
+++ b/examples/src/main/python/ml/vector_slicer_example.py
@@ -32,8 +32,8 @@ if __name__ == "__main__":
# $example on$
df = spark.createDataFrame([
- Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3}),),
- Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]),)])
+ Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3})),
+ Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]))])
slicer = VectorSlicer(inputCol="userFeatures", outputCol="features", indices=[1])
http://git-wip-us.apache.org/repos/asf/spark/blob/b3c22912/examples/src/main/python/sql/hive.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sql/hive.py b/examples/src/main/python/sql/hive.py
index 9b2a2c4..98b4890 100644
--- a/examples/src/main/python/sql/hive.py
+++ b/examples/src/main/python/sql/hive.py
@@ -79,7 +79,7 @@ if __name__ == "__main__":
# You can also use DataFrames to create temporary views within a SparkSession.
Record = Row("key", "value")
- recordsDF = spark.createDataFrame(map(lambda i: Record(i, "val_" + str(i)), range(1, 101)))
+ recordsDF = spark.createDataFrame([Record(i, "val_" + str(i)) for i in range(1, 101)])
recordsDF.createOrReplaceTempView("records")
# Queries can then join DataFrame data with data stored in Hive.
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org