You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2013/12/06 19:01:59 UTC

svn commit: r1548629 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ src/main/java/org/ap...

Author: jing9
Date: Fri Dec  6 18:01:58 2013
New Revision: 1548629

URL: http://svn.apache.org/r1548629
Log:
HDFS-5312. Generate HTTP/HTTPS URL in DFSUtil#getInfoServer() based on the configured http policy. Contributed by Haohui Mai.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Dec  6 18:01:58 2013
@@ -233,6 +233,9 @@ Trunk (Unreleased)
 
     HDFS-5630. Hook up cache directive and pool usage statistics. (wang)
 
+    HDFS-5312. Generate HTTP / HTTPS URL in DFSUtil#getInfoServer() based on the 
+    configured http policy. (Haohui Mai via jing9)
+
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Fri Dec  6 18:01:58 2013
@@ -92,6 +92,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -958,39 +959,71 @@ public class DFSUtil {
    * given namenode rpc address.
    * @param conf
    * @param namenodeAddr - namenode RPC address
-   * @param httpsAddress -If true, and if security is enabled, returns server 
-   *                      https address. If false, returns server http address.
+   * @param scheme - the scheme (http / https)
    * @return server http or https address
    * @throws IOException 
    */
-  public static String getInfoServer(InetSocketAddress namenodeAddr,
-      Configuration conf, boolean httpsAddress) throws IOException {
-    boolean securityOn = UserGroupInformation.isSecurityEnabled();
-    String httpAddressKey = (securityOn && httpsAddress) ? 
-        DFS_NAMENODE_HTTPS_ADDRESS_KEY : DFS_NAMENODE_HTTP_ADDRESS_KEY;
-    String httpAddressDefault = (securityOn && httpsAddress) ? 
-        DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT : DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
-      
-    String suffixes[];
+  public static URI getInfoServer(InetSocketAddress namenodeAddr,
+      Configuration conf, String scheme) throws IOException {
+    String[] suffixes = null;
     if (namenodeAddr != null) {
       // if non-default namenode, try reverse look up 
       // the nameServiceID if it is available
       suffixes = getSuffixIDs(conf, namenodeAddr,
           DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
           DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    }
+
+    String authority;
+    if ("http".equals(scheme)) {
+      authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
+          DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
+    } else if ("https".equals(scheme)) {
+      authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+          DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
     } else {
-      suffixes = new String[2];
+      throw new IllegalArgumentException("Invalid scheme:" + scheme);
     }
-    String configuredInfoAddr = getSuffixedConf(conf, httpAddressKey,
-        httpAddressDefault, suffixes);
+
     if (namenodeAddr != null) {
-      return substituteForWildcardAddress(configuredInfoAddr,
+      authority = substituteForWildcardAddress(authority,
           namenodeAddr.getHostName());
-    } else {
-      return configuredInfoAddr;
     }
+    return URI.create(scheme + "://" + authority);
+  }
+
+  /**
+   * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
+   * with defaultHost when it found out that the address is a wildcard / local
+   * address.
+   *
+   * @param defaultHost
+   *          The default host name of the namenode.
+   * @param conf
+   *          The configuration
+   * @param scheme
+   *          HTTP or HTTPS
+   * @throws IOException
+   */
+  public static URI getInfoServerWithDefaultHost(String defaultHost,
+      Configuration conf, final String scheme) throws IOException {
+    URI configuredAddr = getInfoServer(null, conf, scheme);
+    String authority = substituteForWildcardAddress(
+        configuredAddr.getAuthority(), defaultHost);
+    return URI.create(scheme + "://" + authority);
+  }
+
+  /**
+   * Determine whether HTTP or HTTPS should be used to connect to the remote
+   * server. Currently the client only connects to the server via HTTPS if the
+   * policy is set to HTTPS_ONLY.
+   *
+   * @return the scheme (HTTP / HTTPS)
+   */
+  public static String getHttpClientScheme(Configuration conf) {
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+    return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
   }
-  
 
   /**
    * Substitute a default host in the case that an address has been configured
@@ -1004,8 +1037,9 @@ public class DFSUtil {
    * @return the substituted address
    * @throws IOException if it is a wildcard address and security is enabled
    */
-  public static String substituteForWildcardAddress(String configuredAddress,
-      String defaultHost) throws IOException {
+  @VisibleForTesting
+  static String substituteForWildcardAddress(String configuredAddress,
+    String defaultHost) throws IOException {
     InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
     InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
         + ":0");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Fri Dec  6 18:01:58 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
+import java.net.URL;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -79,7 +80,7 @@ public class BackupNode extends NameNode
   /** Name-node RPC address */
   String nnRpcAddress;
   /** Name-node HTTP address */
-  String nnHttpAddress;
+  URL nnHttpAddress;
   /** Checkpoint manager */
   Checkpointer checkpointManager;
   
@@ -313,7 +314,8 @@ public class BackupNode extends NameNode
         NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
         true).getProxy();
     this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
-    this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
+    this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
+        DFSUtil.getHttpClientScheme(conf)).toURL();
     // get version and id info from the name-node
     NamespaceInfo nsInfo = null;
     while(!isStopRequested()) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java Fri Dec  6 18:01:58 2013
@@ -24,11 +24,14 @@ import static org.apache.hadoop.util.Tim
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -61,6 +64,7 @@ class Checkpointer extends Daemon {
   private String infoBindAddress;
 
   private CheckpointConf checkpointConf;
+  private final Configuration conf;
 
   private BackupImage getFSImage() {
     return (BackupImage)backupNode.getFSImage();
@@ -74,6 +78,7 @@ class Checkpointer extends Daemon {
    * Create a connection to the primary namenode.
    */
   Checkpointer(Configuration conf, BackupNode bnNode)  throws IOException {
+    this.conf = conf;
     this.backupNode = bnNode;
     try {
       initialize(conf);
@@ -274,10 +279,15 @@ class Checkpointer extends Daemon {
         + " New Image Size: " + imageSize);
   }
 
-  private InetSocketAddress getImageListenAddress() {
+  private URL getImageListenAddress() {
     InetSocketAddress httpSocAddr = backupNode.getHttpAddress();
     int httpPort = httpSocAddr.getPort();
-    return new InetSocketAddress(infoBindAddress, httpPort);
+    try {
+      return new URL(DFSUtil.getHttpClientScheme(conf) + "://" + infoBindAddress + ":" + httpPort);
+    } catch (MalformedURLException e) {
+      // Unreachable
+      throw new RuntimeException(e);
+    }
   }
 
   static void rollForwardByApplyingLogs(

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java Fri Dec  6 18:01:58 2013
@@ -21,6 +21,7 @@ import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.JsonNode;
 import org.codehaus.jackson.map.ObjectMapper;
@@ -272,12 +272,13 @@ class ClusterJspHelper {
   static class NamenodeMXBeanHelper {
     private static final ObjectMapper mapper = new ObjectMapper();
     private final String host;
-    private final String httpAddress;
+    private final URI httpAddress;
     
     NamenodeMXBeanHelper(InetSocketAddress addr, Configuration conf)
         throws IOException, MalformedObjectNameException {
       this.host = addr.getHostName();
-      this.httpAddress = DFSUtil.getInfoServer(addr, conf, false);
+      this.httpAddress = DFSUtil.getInfoServer(addr, conf,
+          DFSUtil.getHttpClientScheme(conf));
     }
     
     /** Get the map corresponding to the JSON string */
@@ -356,7 +357,7 @@ class ClusterJspHelper {
       nn.blocksCount = getProperty(props, "TotalBlocks").getLongValue();
       nn.missingBlocksCount = getProperty(props, "NumberOfMissingBlocks")
           .getLongValue();
-      nn.httpAddress = httpAddress;
+      nn.httpAddress = httpAddress.toURL();
       getLiveNodeCount(getProperty(props, "LiveNodes").getValueAsText(), nn);
       getDeadNodeCount(getProperty(props, "DeadNodes").getValueAsText(), nn);
       nn.softwareVersion = getProperty(props, "SoftwareVersion").getTextValue();
@@ -591,12 +592,14 @@ class ClusterJspHelper {
         toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
         toXmlItemBlock(doc, "Missing Blocks",
             Long.toString(nn.missingBlocksCount));
-        toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" +
-          nn.liveDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=LIVE",
-          "Live Datanode (Decommissioned)");
-        toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" +
-          nn.deadDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=DEAD"
-          , "Dead Datanode (Decommissioned)");
+        toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " ("
+            + nn.liveDecomCount + ")", new URL(nn.httpAddress,
+            "/dfsnodelist.jsp?whatNodes=LIVE"),
+            "Live Datanode (Decommissioned)");
+        toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " ("
+            + nn.deadDecomCount + ")", new URL(nn.httpAddress,
+            "/dfsnodelist.jsp?whatNodes=DEAD"),
+            "Dead Datanode (Decommissioned)");
         toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
         doc.endTag(); // node
       }
@@ -625,7 +628,7 @@ class ClusterJspHelper {
     int liveDecomCount = 0;
     int deadDatanodeCount = 0;
     int deadDecomCount = 0;
-    String httpAddress = null;
+    URL httpAddress = null;
     String softwareVersion = "";
   }
 
@@ -763,7 +766,8 @@ class ClusterJspHelper {
                 .equals(DecommissionStates.UNKNOWN.toString()))) {
           doc.startTag("node");
           // dn
-          toXmlItemBlockWithLink(doc, dnhost, (dnhost+":"+httpPort),"DataNode");
+          toXmlItemBlockWithLink(doc, dnhost, new URL("http", dnhost, httpPort,
+              ""), "DataNode");
 
           // overall status first
           toXmlItemBlock(doc, OVERALL_STATUS, overallStatus);
@@ -823,11 +827,11 @@ class ClusterJspHelper {
    * link="http://hostname:50070" />
    */
   private static void toXmlItemBlockWithLink(XMLOutputter doc, String value,
-      String url, String label) throws IOException {
+      URL url, String label) throws IOException {
     doc.startTag("item");
     doc.attribute("label", label);
     doc.attribute("value", value);
-    doc.attribute("link", "///" + url);
+    doc.attribute("link", url.toString());
     doc.endTag(); // item
   }
 
@@ -885,7 +889,7 @@ class ClusterJspHelper {
     return out.toString();
   }
 
-  private static String queryMbean(String httpAddress, Configuration conf) 
+  private static String queryMbean(URI httpAddress, Configuration conf)
     throws IOException {
     /**
      * Although the other namenode might support HTTPS, it is fundamentally
@@ -896,7 +900,7 @@ class ClusterJspHelper {
      *
      * As a result, we just hard code the connection as an HTTP connection.
      */
-    URL url = new URL("http://" + httpAddress + JMX_QRY);
+    URL url = new URL(httpAddress.toURL(), JMX_QRY);
     return readOutput(url);
   }
   /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Fri Dec  6 18:01:58 2013
@@ -21,6 +21,7 @@ import java.security.PrivilegedException
 import java.util.*;
 import java.io.*;
 import java.net.InetSocketAddress;
+import java.net.URL;
 
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
@@ -31,10 +32,8 @@ import javax.servlet.http.HttpServletRes
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -87,8 +86,8 @@ public class GetImageServlet extends Htt
       ServletContext context = getServletContext();
       final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
       final GetImageParams parsedParams = new GetImageParams(request, response);
-      final Configuration conf = 
-        (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
+      final Configuration conf = (Configuration) context
+          .getAttribute(JspHelper.CURRENT_CONF);
       
       if (UserGroupInformation.isSecurityEnabled() && 
           !isValidRequestor(context, request.getUserPrincipal().getName(), conf)) {
@@ -163,7 +162,7 @@ public class GetImageServlet extends Htt
               // issue a HTTP get request to download the new fsimage 
               MD5Hash downloadImageDigest =
                 TransferFsImage.downloadImageToStorage(
-                        parsedParams.getInfoServer(), txid,
+                        parsedParams.getInfoServer(conf), txid,
                         nnImage.getStorage(), true);
               nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
               
@@ -309,7 +308,9 @@ public class GetImageServlet extends Htt
   }
   
   static String getParamStringToPutImage(long txid,
-      InetSocketAddress imageListenAddress, Storage storage) {
+      URL url, Storage storage) {
+    InetSocketAddress imageListenAddress = NetUtils.createSocketAddr(url
+        .getAuthority());
     String machine = !imageListenAddress.isUnresolved()
         && imageListenAddress.getAddress().isAnyLocalAddress() ? null
         : imageListenAddress.getHostName();
@@ -419,11 +420,11 @@ public class GetImageServlet extends Htt
       return isPutImage;
     }
     
-    String getInfoServer() throws IOException{
+    URL getInfoServer(Configuration conf) throws IOException {
       if (machineName == null || remoteport == 0) {
-        throw new IOException ("MachineName and port undefined");
+        throw new IOException("MachineName and port undefined");
       }
-      return machineName + ":" + remoteport;
+      return new URL(DFSUtil.getHttpClientScheme(conf), machineName, remoteport, "");
     }
     
     boolean shouldFetchLatest() {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Dec  6 18:01:58 2013
@@ -29,7 +29,9 @@ import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
@@ -111,7 +113,7 @@ public class SecondaryNameNode implement
   private final long starttime = Time.now();
   private volatile long lastCheckpointTime = 0;
 
-  private String fsName;
+  private URL fsName;
   private CheckpointStorage checkpointImage;
 
   private NamenodeProtocol namenode;
@@ -404,7 +406,7 @@ public class SecondaryNameNode implement
    * @throws IOException
    */
   static boolean downloadCheckpointFiles(
-      final String nnHostPort,
+      final URL nnHostPort,
       final FSImage dstImage,
       final CheckpointSignature sig,
       final RemoteEditLogManifest manifest
@@ -467,25 +469,33 @@ public class SecondaryNameNode implement
   /**
    * Returns the Jetty server that the Namenode is listening on.
    */
-  private String getInfoServer() throws IOException {
+  private URL getInfoServer() throws IOException {
     URI fsName = FileSystem.getDefaultUri(conf);
     if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
       throw new IOException("This is not a DFS");
     }
 
-    String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
-    String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
-        fsName.getHost());
-    LOG.debug("Will connect to NameNode at HTTP address: " + address);
-    return address;
+    final String scheme = DFSUtil.getHttpClientScheme(conf);
+    URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
+        scheme);
+    LOG.debug("Will connect to NameNode at " + address);
+    return address.toURL();
   }
   
   /**
    * Return the host:port of where this SecondaryNameNode is listening
    * for image transfers
    */
-  private InetSocketAddress getImageListenAddress() {
-    return new InetSocketAddress(infoBindAddress, infoPort);
+  private URL getImageListenAddress() {
+    StringBuilder sb = new StringBuilder()
+        .append(DFSUtil.getHttpClientScheme(conf)).append("://")
+        .append(infoBindAddress).append(":").append(infoPort);
+    try {
+      return new URL(sb.toString());
+    } catch (MalformedURLException e) {
+      // Unreachable
+      throw new RuntimeException(e);
+    }
   }
 
   /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Fri Dec  6 18:01:58 2013
@@ -17,13 +17,18 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.*;
-import java.net.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URL;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.List;
-import java.lang.Math;
 
 import javax.servlet.ServletOutputStream;
 import javax.servlet.ServletResponse;
@@ -41,14 +46,16 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
@@ -76,15 +83,15 @@ public class TransferFsImage {
 
   private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
   
-  public static void downloadMostRecentImageToDirectory(String fsName,
+  public static void downloadMostRecentImageToDirectory(URL infoServer,
       File dir) throws IOException {
     String fileId = GetImageServlet.getParamStringForMostRecentImage();
-    getFileClient(fsName, fileId, Lists.newArrayList(dir),
+    getFileClient(infoServer, fileId, Lists.newArrayList(dir),
         null, false);
   }
 
   public static MD5Hash downloadImageToStorage(
-      String fsName, long imageTxId, Storage dstStorage, boolean needDigest)
+      URL fsName, long imageTxId, Storage dstStorage, boolean needDigest)
       throws IOException {
     String fileid = GetImageServlet.getParamStringForImage(
         imageTxId, dstStorage);
@@ -102,7 +109,7 @@ public class TransferFsImage {
     return hash;
   }
   
-  static void downloadEditsToStorage(String fsName, RemoteEditLog log,
+  static void downloadEditsToStorage(URL fsName, RemoteEditLog log,
       NNStorage dstStorage) throws IOException {
     assert log.getStartTxId() > 0 && log.getEndTxId() > 0 :
       "bad log: " + log;
@@ -156,17 +163,17 @@ public class TransferFsImage {
    * Requests that the NameNode download an image from this node.
    *
    * @param fsName the http address for the remote NN
-   * @param imageListenAddress the host/port where the local node is running an
+   * @param myNNAddress the host/port where the local node is running an
    *                           HTTPServer hosting GetImageServlet
    * @param storage the storage directory to transfer the image from
    * @param txid the transaction ID of the image to be uploaded
    */
-  public static void uploadImageFromStorage(String fsName,
-      InetSocketAddress imageListenAddress,
+  public static void uploadImageFromStorage(URL fsName,
+      URL myNNAddress,
       Storage storage, long txid) throws IOException {
     
     String fileid = GetImageServlet.getParamStringToPutImage(
-        txid, imageListenAddress, storage);
+        txid, myNNAddress, storage);
     // this doesn't directly upload an image, but rather asks the NN
     // to connect back to the 2NN to download the specified image.
     try {
@@ -244,17 +251,11 @@ public class TransferFsImage {
    *                   this storage object will be notified. 
    * @Return a digest of the received file if getChecksum is true
    */
-  static MD5Hash getFileClient(String nnHostPort,
+  static MD5Hash getFileClient(URL infoServer,
       String queryString, List<File> localPaths,
       Storage dstStorage, boolean getChecksum) throws IOException {
-
-    String str = HttpConfig.getSchemePrefix() + nnHostPort + "/getimage?" +
-        queryString;
-    LOG.info("Opening connection to " + str);
-    //
-    // open connection to remote server
-    //
-    URL url = new URL(str);
+    URL url = new URL(infoServer, "/getimage?" + queryString);
+    LOG.info("Opening connection to " + url);
     return doGetUrl(url, localPaths, dstStorage, getChecksum);
   }
   

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Fri Dec  6 18:01:58 2013
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFS
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedAction;
 import java.util.Collection;
 import java.util.List;
@@ -69,7 +70,7 @@ public class BootstrapStandby implements
   private String nnId;
   private String otherNNId;
 
-  private String otherHttpAddr;
+  private URL otherHttpAddr;
   private InetSocketAddress otherIpcAddr;
   private Collection<URI> dirsToFormat;
   private List<URI> editUrisToFormat;
@@ -179,6 +180,7 @@ public class BootstrapStandby implements
     // Check with the user before blowing away data.
     if (!Storage.confirmFormat(storage.dirIterable(null),
             force, interactive)) {
+      storage.close();
       return ERR_CODE_ALREADY_FORMATTED;
     }
     
@@ -203,7 +205,7 @@ public class BootstrapStandby implements
 
     // Download that checkpoint into our storage directories.
     MD5Hash hash = TransferFsImage.downloadImageToStorage(
-        otherHttpAddr.toString(), imageTxId,
+        otherHttpAddr, imageTxId,
         storage, true);
     image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
     return 0;
@@ -276,11 +278,10 @@ public class BootstrapStandby implements
         "Could not determine valid IPC address for other NameNode (%s)" +
         ", got: %s", otherNNId, otherIpcAddr);
 
-    otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
-    otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
-        otherIpcAddr.getHostName());
-    
-    
+    final String scheme = DFSUtil.getHttpClientScheme(conf);
+    otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
+        otherIpcAddr.getHostName(), otherNode, scheme).toURL();
+
     dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
     editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
         conf, false);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Fri Dec  6 18:01:58 2013
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.server.na
 import static org.apache.hadoop.util.Time.now;
 
 import java.io.IOException;
-import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedAction;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -43,7 +44,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -66,8 +66,8 @@ public class StandbyCheckpointer {
   private long lastCheckpointTime;
   private final CheckpointerThread thread;
   private final ThreadFactory uploadThreadFactory;
-  private String activeNNAddress;
-  private InetSocketAddress myNNAddress;
+  private URL activeNNAddress;
+  private URL myNNAddress;
 
   private Object cancelLock = new Object();
   private Canceler canceler;
@@ -94,7 +94,7 @@ public class StandbyCheckpointer {
    */
   private void setNameNodeAddresses(Configuration conf) throws IOException {
     // Look up our own address.
-    String myAddrString = getHttpAddress(conf);
+    myNNAddress = getHttpAddress(conf);
 
     // Look up the active node's address
     Configuration confForActive = HAUtil.getConfForOtherNode(conf);
@@ -103,32 +103,22 @@ public class StandbyCheckpointer {
     // Sanity-check.
     Preconditions.checkArgument(checkAddress(activeNNAddress),
         "Bad address for active NN: %s", activeNNAddress);
-    Preconditions.checkArgument(checkAddress(myAddrString),
-        "Bad address for standby NN: %s", myAddrString);
-    myNNAddress = NetUtils.createSocketAddr(myAddrString);
+    Preconditions.checkArgument(checkAddress(myNNAddress),
+        "Bad address for standby NN: %s", myNNAddress);
   }
   
-  private String getHttpAddress(Configuration conf) throws IOException {
-    String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
-    
-    // Use the hostname from the RPC address as a default, in case
-    // the HTTP address is configured to 0.0.0.0.
-    String hostnameFromRpc = NameNode.getServiceAddress(
-        conf, true).getHostName();
-    try {
-      return DFSUtil.substituteForWildcardAddress(
-          configuredAddr, hostnameFromRpc);
-    } catch (IOException e) {
-      throw new IllegalArgumentException(e);
-    }
+  private URL getHttpAddress(Configuration conf) throws IOException {
+    final String scheme = DFSUtil.getHttpClientScheme(conf);
+    String defaultHost = NameNode.getServiceAddress(conf, true).getHostName();
+    URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme);
+    return addr.toURL();
   }
   
   /**
    * Ensure that the given address is valid and has a port
    * specified.
    */
-  private boolean checkAddress(String addrStr) {
-    InetSocketAddress addr = NetUtils.createSocketAddr(addrStr);
+  private static boolean checkAddress(URL addr) {
     return addr.getPort() != 0;
   }
 
@@ -344,7 +334,7 @@ public class StandbyCheckpointer {
   }
 
   @VisibleForTesting
-  String getActiveNNAddress() {
+  URL getActiveNNAddress() {
     return activeNNAddress;
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Dec  6 18:01:58 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -47,9 +48,9 @@ import org.apache.hadoop.hdfs.NameNodePr
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.ipc.RPC;
@@ -547,8 +548,10 @@ public class DFSAdmin extends FsShell {
    * @throws IOException
    */
   public int fetchImage(final String[] argv, final int idx) throws IOException {
-    final String infoServer = DFSUtil.getInfoServer(
-        HAUtil.getAddressOfActive(getDFS()), getConf(), false);
+    Configuration conf = getConf();
+    final URL infoServer = DFSUtil.getInfoServer(
+        HAUtil.getAddressOfActive(getDFS()), conf,
+        DFSUtil.getHttpClientScheme(conf)).toURL();
     SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws Exception {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Fri Dec  6 18:01:58 2013
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
 import java.net.URLEncoder;
@@ -37,7 +38,6 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.StringUtils;
@@ -227,7 +227,7 @@ public class DFSck extends Configured im
    * @return Returns http address or null if failure.
    * @throws IOException if we can't determine the active NN address
    */
-  private String getCurrentNamenodeAddress() throws IOException {
+  private URI getCurrentNamenodeAddress() throws IOException {
     //String nnAddress = null;
     Configuration conf = getConf();
 
@@ -245,19 +245,21 @@ public class DFSck extends Configured im
       return null;
     }
     
-    return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false);
+    return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
+        DFSUtil.getHttpClientScheme(conf));
   }
 
   private int doWork(final String[] args) throws IOException {
-    final StringBuilder url = new StringBuilder(HttpConfig.getSchemePrefix());
+    final StringBuilder url = new StringBuilder();
     
-    String namenodeAddress = getCurrentNamenodeAddress();
+    URI namenodeAddress = getCurrentNamenodeAddress();
     if (namenodeAddress == null) {
       //Error message already output in {@link #getCurrentNamenodeAddress()}
       System.err.println("DFSck exiting.");
       return 0;
     }
-    url.append(namenodeAddress);
+
+    url.append(namenodeAddress.toString());
     System.err.println("Connecting to namenode via " + url.toString());
     
     url.append("/fsck?ugi=").append(ugi.getShortUserName());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Fri Dec  6 18:01:58 2013
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
@@ -431,20 +430,22 @@ public class TestDFSUtil {
   }
 
   @Test
-  public void testGetInfoServer() throws IOException {
+  public void testGetInfoServer() throws IOException, URISyntaxException {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     
-    String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
-    
-    String httpport = DFSUtil.getInfoServer(null, conf, false);
-    assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
-    
-    String httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
-        "localhost", 8020), conf, false);
-    assertEquals("localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT, httpAddress);
+    URI httpsport = DFSUtil.getInfoServer(null, conf, "https");
+    assertEquals(new URI("https", null, "0.0.0.0",
+        DFS_NAMENODE_HTTPS_PORT_DEFAULT, null, null, null), httpsport);
+    
+    URI httpport = DFSUtil.getInfoServer(null, conf, "http");
+    assertEquals(new URI("http", null, "0.0.0.0",
+        DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
+
+    URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
+        "localhost", 8020), conf, "http");
+    assertEquals(
+        URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
+        httpAddress);
   }
   
   @Test

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Dec  6 18:01:58 2013
@@ -34,6 +34,7 @@ import java.io.RandomAccessFile;
 import java.lang.management.ManagementFactory;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@@ -218,6 +218,7 @@ public class TestCheckpoint {
     assertTrue("Removed directory wasn't what was expected",
                listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
                toString().indexOf("storageDirToCheck") != -1);
+    nnStorage.close();
   }
 
   /*
@@ -1947,8 +1948,9 @@ public class TestCheckpoint {
           .format(true).build();
       
       NamenodeProtocols nn = cluster.getNameNodeRpc();
-      String fsName = NetUtils.getHostPortString(
-          cluster.getNameNode().getHttpAddress());
+      URL fsName = DFSUtil.getInfoServer(
+          cluster.getNameNode().getServiceRpcAddress(), conf,
+          DFSUtil.getHttpClientScheme(conf)).toURL();
 
       // Make a finalized log on the server side. 
       nn.rollEditLog();
@@ -1980,8 +1982,7 @@ public class TestCheckpoint {
       }
 
       try {
-        InetSocketAddress fakeAddr = new InetSocketAddress(1);
-        TransferFsImage.uploadImageFromStorage(fsName, fakeAddr, dstImage, 0);
+        TransferFsImage.uploadImageFromStorage(fsName, new URL("http://localhost:1234"), dstImage, 0);
         fail("Storage info was not verified");
       } catch (IOException ioe) {
         String msg = StringUtils.stringifyException(ioe);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java Fri Dec  6 18:01:58 2013
@@ -34,11 +34,11 @@ import javax.servlet.http.HttpServletReq
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
@@ -66,8 +66,9 @@ public class TestTransferFsImage {
         new File("/xxxxx-does-not-exist/blah"));
        
     try {
-      String fsName = NetUtils.getHostPortString(
-          cluster.getNameNode().getHttpAddress());
+      URL fsName = DFSUtil.getInfoServer(
+          cluster.getNameNode().getServiceRpcAddress(), conf,
+          DFSUtil.getHttpClientScheme(conf)).toURL();
       String id = "getimage=1&txid=0";
 
       TransferFsImage.getFileClient(fsName, id, localPath, mockStorage, false);      
@@ -98,8 +99,10 @@ public class TestTransferFsImage {
         );
        
     try {
-      String fsName = NetUtils.getHostPortString(
-          cluster.getNameNode().getHttpAddress());
+      URL fsName = DFSUtil.getInfoServer(
+          cluster.getNameNode().getServiceRpcAddress(), conf,
+          DFSUtil.getHttpClientScheme(conf)).toURL();
+
       String id = "getimage=1&txid=0";
 
       TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);      
@@ -123,7 +126,7 @@ public class TestTransferFsImage {
       URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
       TransferFsImage.timeout = 2000;
       try {
-        TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
+        TransferFsImage.getFileClient(serverURL, "txid=1", null,
             null, false);
         fail("TransferImage Should fail with timeout");
       } catch (SocketTimeoutException e) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java?rev=1548629&r1=1548628&r2=1548629&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java Fri Dec  6 18:01:58 2013
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.URI;
+import java.net.URL;
 import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
@@ -86,7 +87,8 @@ public class TestHAConfiguration {
     // 0.0.0.0, it should substitute the address from the RPC configuration
     // above.
     StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
-    assertEquals("1.2.3.2:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
+    assertEquals(new URL("http", "1.2.3.2",
+        DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
         checkpointer.getActiveNNAddress());
   }