You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by tg...@apache.org on 2014/10/08 15:51:29 UTC

git commit: [SPARK-3788] [yarn] Fix compareFs to do the right thing for HDFS namespaces (1.1 version).

Repository: spark
Updated Branches:
  refs/heads/branch-1.1 a1f833f75 -> a44af7302


[SPARK-3788] [yarn] Fix compareFs to do the right thing for HDFS namespaces (1.1 version).

HA and viewfs use namespaces instead of host names, so you can't
resolve them since that will fail. So be smarter to avoid doing
unnecessary work.

Author: Marcelo Vanzin <va...@cloudera.com>

Closes #2650 from vanzin/SPARK-3788-1.1 and squashes the following commits:

174bf71 [Marcelo Vanzin] Update comment.
0e36be7 [Marcelo Vanzin] Use Objects.equal() instead of ==.
772aead [Marcelo Vanzin] [SPARK-3788] [yarn] Fix compareFs to do the right thing for HA, federation (1.1 version).


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/a44af730
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/a44af730
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/a44af730

Branch: refs/heads/branch-1.1
Commit: a44af7302f814204fdbcc7ad620bc6984b376468
Parents: a1f833f
Author: Marcelo Vanzin <va...@cloudera.com>
Authored: Wed Oct 8 08:51:17 2014 -0500
Committer: Thomas Graves <tg...@apache.org>
Committed: Wed Oct 8 08:51:17 2014 -0500

----------------------------------------------------------------------
 .../apache/spark/deploy/yarn/ClientBase.scala   | 29 ++++++++------------
 1 file changed, 11 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/a44af730/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
----------------------------------------------------------------------
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
index 6da3b16..27ee04a 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
+++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
@@ -24,6 +24,7 @@ import scala.collection.JavaConversions._
 import scala.collection.mutable.{HashMap, ListBuffer, Map}
 import scala.util.{Try, Success, Failure}
 
+import com.google.common.base.Objects
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs._
 import org.apache.hadoop.fs.permission.FsPermission
@@ -122,15 +123,17 @@ trait ClientBase extends Logging {
   private def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
     val srcUri = srcFs.getUri()
     val dstUri = destFs.getUri()
-    if (srcUri.getScheme() == null) {
-      return false
-    }
-    if (!srcUri.getScheme().equals(dstUri.getScheme())) {
+    if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
       return false
     }
+
     var srcHost = srcUri.getHost()
     var dstHost = dstUri.getHost()
-    if ((srcHost != null) && (dstHost != null)) {
+
+    // In HA or when using viewfs, the host part of the URI may not actually be a host, but the
+    // name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
+    // match.
+    if (srcHost != null && dstHost != null && srcHost != dstHost) {
       try {
         srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
         dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
@@ -138,19 +141,9 @@ trait ClientBase extends Logging {
         case e: UnknownHostException =>
           return false
       }
-      if (!srcHost.equals(dstHost)) {
-        return false
-      }
-    } else if (srcHost == null && dstHost != null) {
-      return false
-    } else if (srcHost != null && dstHost == null) {
-      return false
-    }
-    if (srcUri.getPort() != dstUri.getPort()) {
-      false
-    } else {
-      true
     }
+
+    Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
   }
 
   /** Copy the file into HDFS if needed. */
@@ -621,7 +614,7 @@ object ClientBase extends Logging {
     YarnSparkHadoopUtil.addToEnvironment(env, Environment.CLASSPATH.name, path,
             File.pathSeparator)
 
-  /** 
+  /**
    * Get the list of namenodes the user may access.
    */
   private[yarn] def getNameNodesToAccess(sparkConf: SparkConf): Set[Path] = {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org