You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Min Wei (JIRA)" <ji...@apache.org> on 2016/07/26 00:07:20 UTC

[jira] [Created] (SPARK-16725) Migrate Guava to 16+

Min Wei created SPARK-16725:
-------------------------------

             Summary: Migrate Guava to 16+
                 Key: SPARK-16725
                 URL: https://issues.apache.org/jira/browse/SPARK-16725
             Project: Spark
          Issue Type: Bug
          Components: Build
    Affects Versions: 2.0.1
            Reporter: Min Wei
             Fix For: 2.0.1


Currently Spark depends on an old version of Guava, version 14. However Spark-cassandra driver asserts on Guava version 16 and above. 

It would be great to update the Guava dependency to version 16+

diff --git a/core/src/main/scala/org/apache/spark/SecurityManager.scala b/core/src/main/scala/org/apache/spark/SecurityManager.scala
index f72c7de..abddafe 100644
--- a/core/src/main/scala/org/apache/spark/SecurityManager.scala
+++ b/core/src/main/scala/org/apache/spark/SecurityManager.scala
@@ -23,7 +23,7 @@ import java.security.{KeyStore, SecureRandom}
 import java.security.cert.X509Certificate
 import javax.net.ssl._
 
-import com.google.common.hash.HashCodes
+import com.google.common.hash.HashCode
 import com.google.common.io.Files
 import org.apache.hadoop.io.Text
 
@@ -432,7 +432,7 @@ private[spark] class SecurityManager(sparkConf: SparkConf)
         val secret = new Array[Byte](length)
         rnd.nextBytes(secret)
 
-        val cookie = HashCodes.fromBytes(secret).toString()
+        val cookie = HashCode.fromBytes(secret).toString()
         SparkHadoopUtil.get.addSecretKeyToUserCredentials(SECRET_LOOKUP_KEY, cookie)
         cookie
       } else {
diff --git a/core/src/main/scala/org/apache/spark/SparkEnv.scala b/core/src/main/scala/org/apache/spark/SparkEnv.scala
index af50a6d..02545ae 100644
--- a/core/src/main/scala/org/apache/spark/SparkEnv.scala
+++ b/core/src/main/scala/org/apache/spark/SparkEnv.scala
@@ -72,7 +72,7 @@ class SparkEnv (
 
   // A general, soft-reference map for metadata needed during HadoopRDD split computation
   // (e.g., HadoopFileRDD uses this to cache JobConfs and InputFormats).
-  private[spark] val hadoopJobMetadata = new MapMaker().softValues().makeMap[String, Any]()
+  private[spark] val hadoopJobMetadata = new MapMaker().weakValues().makeMap[String, Any]()
 
   private[spark] var driverTmpDir: Option[String] = None
 
diff --git a/pom.xml b/pom.xml
index d064cb5..7c3e036 100644
--- a/pom.xml
+++ b/pom.xml
@@ -368,8 +368,7 @@
       <dependency>
         <groupId>com.google.guava</groupId>
         <artifactId>guava</artifactId>
-        <version>14.0.1</version>
-        <scope>provided</scope>
+        <version>19.0</version>
       </dependency>
       <!-- End of shaded deps -->
       <dependency>




--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org