You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by sr...@apache.org on 2016/11/02 09:09:22 UTC

spark git commit: [MINOR] Use <= for clarity in Pi examples' Monte Carlo process

Repository: spark
Updated Branches:
  refs/heads/master 2dc048081 -> bcbe44440


[MINOR] Use <= for clarity in Pi examples' Monte Carlo process

## What changes were proposed in this pull request?

If my understanding is correct we should be rather looking at closed disk than the opened one.
## How was this patch tested?

Run simple comparison, of the mean squared error of approaches with closed and opened disk.
https://gist.github.com/mrydzy/1cf0e5c316ef9d6fbd91426b91f1969f
The closed one performed slightly better, but the tested sample wasn't too big, so I rely mostly on the algorithm  understanding.

Author: Maria Rydzy <ma...@gmail.com>

Closes #15687 from mrydzy/master.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/bcbe4444
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/bcbe4444
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/bcbe4444

Branch: refs/heads/master
Commit: bcbe44440e6c871e217f06d2a4696fd41f1d2606
Parents: 2dc0480
Author: Maria Rydzy <ma...@gmail.com>
Authored: Wed Nov 2 09:09:16 2016 +0000
Committer: Sean Owen <so...@cloudera.com>
Committed: Wed Nov 2 09:09:16 2016 +0000

----------------------------------------------------------------------
 examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java | 2 +-
 examples/src/main/python/pi.py                                    | 2 +-
 examples/src/main/scala/org/apache/spark/examples/LocalPi.scala   | 2 +-
 examples/src/main/scala/org/apache/spark/examples/SparkPi.scala   | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/bcbe4444/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
index 7df145e..89855e8 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
@@ -54,7 +54,7 @@ public final class JavaSparkPi {
       public Integer call(Integer integer) {
         double x = Math.random() * 2 - 1;
         double y = Math.random() * 2 - 1;
-        return (x * x + y * y < 1) ? 1 : 0;
+        return (x * x + y * y <= 1) ? 1 : 0;
       }
     }).reduce(new Function2<Integer, Integer, Integer>() {
       @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/bcbe4444/examples/src/main/python/pi.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/pi.py b/examples/src/main/python/pi.py
index e3f0c4a..37029b7 100755
--- a/examples/src/main/python/pi.py
+++ b/examples/src/main/python/pi.py
@@ -38,7 +38,7 @@ if __name__ == "__main__":
     def f(_):
         x = random() * 2 - 1
         y = random() * 2 - 1
-        return 1 if x ** 2 + y ** 2 < 1 else 0
+        return 1 if x ** 2 + y ** 2 <= 1 else 0
 
     count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
     print("Pi is roughly %f" % (4.0 * count / n))

http://git-wip-us.apache.org/repos/asf/spark/blob/bcbe4444/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
index 720d92f..121b768 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
@@ -26,7 +26,7 @@ object LocalPi {
     for (i <- 1 to 100000) {
       val x = random * 2 - 1
       val y = random * 2 - 1
-      if (x*x + y*y < 1) count += 1
+      if (x*x + y*y <= 1) count += 1
     }
     println("Pi is roughly " + 4 * count / 100000.0)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/bcbe4444/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
index 272c1a4..a5cacf1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
@@ -34,7 +34,7 @@ object SparkPi {
     val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
       val x = random * 2 - 1
       val y = random * 2 - 1
-      if (x*x + y*y < 1) 1 else 0
+      if (x*x + y*y <= 1) 1 else 0
     }.reduce(_ + _)
     println("Pi is roughly " + 4.0 * count / (n - 1))
     spark.stop()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org