You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2014/07/10 08:45:25 UTC

git commit: Clean up SparkKMeans example's code

Repository: spark
Updated Branches:
  refs/heads/master 553c578de -> 2b18ea982


Clean up SparkKMeans example's code

remove unused code

Author: Raymond Liu <ra...@intel.com>

Closes #1352 from colorant/kmeans and squashes the following commits:

ddcd1dd [Raymond Liu] Clean up SparkKMeans example's code


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/2b18ea98
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/2b18ea98
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/2b18ea98

Branch: refs/heads/master
Commit: 2b18ea9826395177ac2203dbf8eb37c220ab8e67
Parents: 553c578
Author: Raymond Liu <ra...@intel.com>
Authored: Wed Jul 9 23:39:29 2014 -0700
Committer: Reynold Xin <rx...@apache.org>
Committed: Wed Jul 9 23:39:29 2014 -0700

----------------------------------------------------------------------
 .../src/main/scala/org/apache/spark/examples/SparkKMeans.scala  | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/2b18ea98/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
index 4d28e0a..79cfedf 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark.examples
 
-import java.util.Random
-
 import breeze.linalg.{Vector, DenseVector, squaredDistance}
 
 import org.apache.spark.{SparkConf, SparkContext}
@@ -28,15 +26,12 @@ import org.apache.spark.SparkContext._
  * K-means clustering.
  */
 object SparkKMeans {
-  val R = 1000     // Scaling factor
-  val rand = new Random(42)
 
   def parseVector(line: String): Vector[Double] = {
     DenseVector(line.split(' ').map(_.toDouble))
   }
 
   def closestPoint(p: Vector[Double], centers: Array[Vector[Double]]): Int = {
-    var index = 0
     var bestIndex = 0
     var closest = Double.PositiveInfinity