You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ra...@apache.org on 2009/03/25 20:23:48 UTC

svn commit: r758416 - in /hadoop/core/trunk: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

Author: rangadi
Date: Wed Mar 25 19:23:33 2009
New Revision: 758416

URL: http://svn.apache.org/viewvc?rev=758416&view=rev
Log:
HADOOP-5191. Accessing HDFS with any ip or hostname should work as long
as it points to the interface NameNode is listening on. (Raghu Angadi)

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=758416&r1=758415&r2=758416&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Mar 25 19:23:33 2009
@@ -316,6 +316,9 @@
     HADOOP-5210. Solves a problem in the progress report of the reduce task.
     (Ravi Gummadi via ddas)
 
+    HADOOP-5191. Accessing HDFS with any ip or hostname should work as long 
+    as it points to the interface NameNode is listening on. (Raghu Angadi)
+
 Release 0.20.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=758416&r1=758415&r2=758416&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed Mar 25 19:23:33 2009
@@ -79,7 +79,7 @@
 
     InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
     this.dfs = new DFSClient(namenode, conf, statistics);
-    this.uri = NameNode.getUri(namenode);
+    this.uri = URI.create("hdfs://" + uri.getAuthority());
     this.workingDir = getHomeDirectory();
   }
 
@@ -92,7 +92,8 @@
     if (thatUri.getScheme() != null
         && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
         && thatUri.getPort() == NameNode.DEFAULT_PORT
-        && thisUri.getPort() == -1
+        && (thisUri.getPort() == -1 || 
+            thisUri.getPort() == NameNode.DEFAULT_PORT)
         && thatAuthority.substring(0,thatAuthority.indexOf(":"))
         .equalsIgnoreCase(thisUri.getAuthority()))
       return;

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=758416&r1=758415&r2=758416&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Wed Mar 25 19:23:33 2009
@@ -112,6 +112,24 @@
         assertFalse(dfs.dfs.isLeaseCheckerStarted());
         dfs.close();
       }
+      
+      { // test accessing DFS with ip address. should work with any hostname
+        // alias or ip address that points to the interface that NameNode
+        // is listening on. In this case, it is localhost.
+        String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + 
+                      "/test/ipAddress/file";
+        Path path = new Path(uri);
+        FileSystem fs = FileSystem.get(path.toUri(), conf);
+        FSDataOutputStream out = fs.create(path);
+        byte[] buf = new byte[1024];
+        out.write(buf);
+        out.close();
+        
+        FSDataInputStream in = fs.open(path);
+        in.readFully(buf);
+        in.close();
+        fs.close();
+      }
     }
     finally {
       if (cluster != null) {cluster.shutdown();}