You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2016/02/05 07:22:43 UTC

spark git commit: [SPARK-13208][CORE] Replace use of Pairs with Tuple2s

Repository: spark
Updated Branches:
  refs/heads/master e3c75c639 -> 352102ed0


[SPARK-13208][CORE] Replace use of Pairs with Tuple2s

Another trivial deprecation fix for Scala 2.11

Author: Jakob Odersky <ja...@odersky.com>

Closes #11089 from jodersky/SPARK-13208.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/352102ed
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/352102ed
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/352102ed

Branch: refs/heads/master
Commit: 352102ed0b7be8c335553d7e0389fd7ce83f5fbf
Parents: e3c75c6
Author: Jakob Odersky <ja...@odersky.com>
Authored: Thu Feb 4 22:22:41 2016 -0800
Committer: Reynold Xin <rx...@databricks.com>
Committed: Thu Feb 4 22:22:41 2016 -0800

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala   | 2 +-
 .../main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala   | 4 ++--
 .../sql/execution/datasources/parquet/ParquetSchemaSuite.scala | 2 +-
 .../org/apache/spark/sql/hive/execution/HiveQuerySuite.scala   | 6 +++---
 4 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/352102ed/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
index 37ae007..13e18a5 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
@@ -230,7 +230,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double])
    * If the RDD contains infinity, NaN throws an exception
    * If the elements in RDD do not vary (max == min) always returns a single bucket.
    */
-  def histogram(bucketCount: Int): Pair[Array[scala.Double], Array[Long]] = {
+  def histogram(bucketCount: Int): (Array[scala.Double], Array[Long]) = {
     val result = srdd.histogram(bucketCount)
     (result._1, result._2)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/352102ed/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala
index c07f346..bd61d04 100644
--- a/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala
@@ -103,7 +103,7 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
    * If the RDD contains infinity, NaN throws an exception
    * If the elements in RDD do not vary (max == min) always returns a single bucket.
    */
-  def histogram(bucketCount: Int): Pair[Array[Double], Array[Long]] = self.withScope {
+  def histogram(bucketCount: Int): (Array[Double], Array[Long]) = self.withScope {
     // Scala's built-in range has issues. See #SI-8782
     def customRange(min: Double, max: Double, steps: Int): IndexedSeq[Double] = {
       val span = max - min
@@ -112,7 +112,7 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
     // Compute the minimum and the maximum
     val (max: Double, min: Double) = self.mapPartitions { items =>
       Iterator(items.foldRight(Double.NegativeInfinity,
-        Double.PositiveInfinity)((e: Double, x: Pair[Double, Double]) =>
+        Double.PositiveInfinity)((e: Double, x: (Double, Double)) =>
         (x._1.max(e), x._2.min(e))))
     }.reduce { (maxmin1, maxmin2) =>
       (maxmin1._1.max(maxmin2._1), maxmin1._2.min(maxmin2._2))

http://git-wip-us.apache.org/repos/asf/spark/blob/352102ed/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
index d860651..90e3d50 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
@@ -261,7 +261,7 @@ class ParquetSchemaInferenceSuite extends ParquetSchemaTest {
     int96AsTimestamp = true,
     writeLegacyParquetFormat = true)
 
-  testSchemaInference[Tuple1[Pair[Int, String]]](
+  testSchemaInference[Tuple1[(Int, String)]](
     "struct",
     """
       |message root {

http://git-wip-us.apache.org/repos/asf/spark/blob/352102ed/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 9632d27..1337a25 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -770,14 +770,14 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
 
   test("SPARK-2180: HAVING support in GROUP BY clauses (positive)") {
     val fixture = List(("foo", 2), ("bar", 1), ("foo", 4), ("bar", 3))
-      .zipWithIndex.map {case Pair(Pair(value, attr), key) => HavingRow(key, value, attr)}
+      .zipWithIndex.map {case ((value, attr), key) => HavingRow(key, value, attr)}
     TestHive.sparkContext.parallelize(fixture).toDF().registerTempTable("having_test")
     val results =
       sql("SELECT value, max(attr) AS attr FROM having_test GROUP BY value HAVING attr > 3")
       .collect()
-      .map(x => Pair(x.getString(0), x.getInt(1)))
+      .map(x => (x.getString(0), x.getInt(1)))
 
-    assert(results === Array(Pair("foo", 4)))
+    assert(results === Array(("foo", 4)))
     TestHive.reset()
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org