You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by jg...@apache.org on 2010/07/09 03:11:33 UTC

svn commit: r962380 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/

Author: jghoman
Date: Fri Jul  9 01:11:33 2010
New Revision: 962380

URL: http://svn.apache.org/viewvc?rev=962380&view=rev
Log:
HDFS-1006. getImage/putImage http requests should be https for the case of security enabled.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=962380&r1=962379&r2=962380&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jul  9 01:11:33 2010
@@ -15,6 +15,9 @@ Trunk (unreleased changes)
 
     HDFS-1005. Fsck security. (borya and Kan Zhang via jghoman)
 
+    HDFS-1006. getImage/putImage http requests should be https for the case 
+    of security enabled. (borya and jghoman via jghoman)
+
   IMPROVEMENTS
 
     HDFS-1096. fix for prev. commit. (boryas)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=962380&r1=962379&r2=962380&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Fri Jul  9 01:11:33 2010
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.security.PrivilegedExceptionAction;
 import java.util.*;
 import java.io.*;
 import javax.servlet.ServletContext;
@@ -26,6 +27,7 @@ import javax.servlet.http.HttpServletReq
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -38,33 +40,41 @@ public class GetImageServlet extends Htt
   private static final long serialVersionUID = -7669068179452648952L;
 
   @SuppressWarnings("unchecked")
-  public void doGet(HttpServletRequest request,
-                    HttpServletResponse response
+  public void doGet(final HttpServletRequest request,
+                    final HttpServletResponse response
                     ) throws ServletException, IOException {
     Map<String,String[]> pmap = request.getParameterMap();
     try {
       ServletContext context = getServletContext();
-      FSImage nnImage = (FSImage)context.getAttribute("name.system.image");
-      TransferFsImage ff = new TransferFsImage(pmap, request, response);
-      if (ff.getImage()) {
-        response.setHeader(TransferFsImage.CONTENT_LENGTH,
-          String.valueOf(nnImage.getFsImageName().length()));
-        // send fsImage
-        TransferFsImage.getFileServer(response.getOutputStream(),
-                                      nnImage.getFsImageName()); 
-      } else if (ff.getEdit()) {
-        response.setHeader(TransferFsImage.CONTENT_LENGTH,
-          String.valueOf(nnImage.getFsEditName().length()));
-        // send edits
-        TransferFsImage.getFileServer(response.getOutputStream(),
-                                      nnImage.getFsEditName());
-      } else if (ff.putImage()) {
-        // issue a HTTP get request to download the new fsimage 
-        nnImage.validateCheckpointUpload(ff.getToken());
-        TransferFsImage.getFileClient(ff.getInfoServer(), "getimage=1", 
-                                      nnImage.getFsImageNameCheckpoint());
-        nnImage.checkpointUploadDone();
-      }
+      final FSImage nnImage = (FSImage)context.getAttribute("name.system.image");
+      final TransferFsImage ff = new TransferFsImage(pmap, request, response);
+      UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() {
+
+        @Override
+        public Void run() throws Exception {
+          if (ff.getImage()) {
+            response.setHeader(TransferFsImage.CONTENT_LENGTH,
+                String.valueOf(nnImage.getFsImageName().length()));
+            // send fsImage
+            TransferFsImage.getFileServer(response.getOutputStream(),
+                nnImage.getFsImageName()); 
+          } else if (ff.getEdit()) {
+            response.setHeader(TransferFsImage.CONTENT_LENGTH,
+                String.valueOf(nnImage.getFsEditName().length()));
+            // send edits
+            TransferFsImage.getFileServer(response.getOutputStream(),
+                nnImage.getFsEditName());
+          } else if (ff.putImage()) {
+            // issue a HTTP get request to download the new fsimage 
+            nnImage.validateCheckpointUpload(ff.getToken());
+            TransferFsImage.getFileClient(ff.getInfoServer(), "getimage=1", 
+                nnImage.getFsImageNameCheckpoint());
+            nnImage.checkpointUploadDone();
+          }
+          return null;
+        }
+      });
+      
     } catch (Exception ie) {
       String errMsg = "GetImage failed. " + StringUtils.stringifyException(ie);
       response.sendError(HttpServletResponse.SC_GONE, errMsg);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=962380&r1=962379&r2=962380&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Jul  9 01:11:33 2010
@@ -21,6 +21,7 @@ import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Date;
@@ -46,7 +47,9 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.UserGroupInformation;
+
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
 
@@ -82,6 +85,7 @@ public class SecondaryNameNode implement
   private volatile boolean shouldRun;
   private HttpServer infoServer;
   private int infoPort;
+  private int imagePort;
   private String infoBindAddress;
 
   private Collection<URI> checkpointDirs;
@@ -125,7 +129,7 @@ public class SecondaryNameNode implement
   /**
    * Initialize SecondaryNameNode.
    */
-  private void initialize(Configuration conf) throws IOException {
+  private void initialize(final Configuration conf) throws IOException {
     // initiate Java VM metrics
     JvmMetrics.init("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
     
@@ -154,23 +158,64 @@ public class SecondaryNameNode implement
                                   DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
 
     // initialize the webserver for uploading files.
-    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
-        conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-                 DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
-    infoBindAddress = infoSocAddr.getHostName();
-    int tmpInfoPort = infoSocAddr.getPort();
-    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
-        tmpInfoPort == 0, conf);
-    infoServer.setAttribute("secondary.name.node", this);
-    infoServer.setAttribute("name.system.image", checkpointImage);
-    this.infoServer.setAttribute("name.conf", conf);
-    infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
-    infoServer.start();
+    // Kerberized SSL servers must be run from the host principal...
+    DFSUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, 
+        DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY);
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+    try {
+      infoServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() {
+
+        @Override
+        public HttpServer run() throws IOException, InterruptedException {
+          LOG.info("Starting web server as: " +
+              UserGroupInformation.getLoginUser().getUserName());
+
+          InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
+              conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+                       DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
+
+          infoBindAddress = infoSocAddr.getHostName();
+          int tmpInfoPort = infoSocAddr.getPort();
+          infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
+              tmpInfoPort == 0, conf);
+          
+          if(UserGroupInformation.isSecurityEnabled()) {
+            System.setProperty("https.cipherSuites", 
+                Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES[0]);
+            InetSocketAddress secInfoSocAddr = 
+              NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.get(
+                "dfs.secondary.https.port", infoBindAddress + ":" + 0));
+            imagePort = secInfoSocAddr.getPort();
+            infoServer.addSslListener(secInfoSocAddr, conf, false, true);
+          }
+          
+          infoServer.setAttribute("secondary.name.node", this);
+          infoServer.setAttribute("name.system.image", checkpointImage);
+          infoServer.setAttribute("name.conf", conf);
+          infoServer.addInternalServlet("getimage", "/getimage",
+              GetImageServlet.class, true);
+          infoServer.start();
+          return infoServer;
+        }
+      });
+    } catch (InterruptedException e) {
+      throw new RuntimeException(e);
+    } finally {
+      // Go back to being the correct Namenode principal
+      LOG.info("Web server init done, returning to: " + 
+          UserGroupInformation.getLoginUser().getUserName());
+      DFSUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
+          DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
+      
+    }
 
     // The web-server port can be ephemeral... ensure we have the correct info
     infoPort = infoServer.getPort();
+    if(!UserGroupInformation.isSecurityEnabled())
+      imagePort = infoPort;
     conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
+    LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
     LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
              "(" + checkpointPeriod/60 + " min)");
     LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
@@ -245,39 +290,49 @@ public class SecondaryNameNode implement
    * files from the name-node.
    * @throws IOException
    */
-  private void downloadCheckpointFiles(CheckpointSignature sig
+  private void downloadCheckpointFiles(final CheckpointSignature sig
                                       ) throws IOException {
-    
-    checkpointImage.cTime = sig.cTime;
-    checkpointImage.checkpointTime = sig.checkpointTime;
-
-    // get fsimage
-    String fileid = "getimage=1";
-    Collection<File> list = checkpointImage.getFiles(NameNodeFile.IMAGE,
-        NameNodeDirType.IMAGE);
-    File[] srcNames = list.toArray(new File[list.size()]);
-    assert srcNames.length > 0 : "No checkpoint targets.";
-    TransferFsImage.getFileClient(fsName, fileid, srcNames);
-    LOG.info("Downloaded file " + srcNames[0].getName() + " size " +
-             srcNames[0].length() + " bytes.");
-
-    // get edits file
-    fileid = "getedit=1";
-    list = getFSImage().getFiles(NameNodeFile.EDITS, NameNodeDirType.EDITS);
-    srcNames = list.toArray(new File[list.size()]);;
-    assert srcNames.length > 0 : "No checkpoint targets.";
-    TransferFsImage.getFileClient(fsName, fileid, srcNames);
-    LOG.info("Downloaded file " + srcNames[0].getName() + " size " +
-        srcNames[0].length() + " bytes.");
-
-    checkpointImage.checkpointUploadDone();
+    try {
+        UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() {
+  
+          @Override
+          public Void run() throws Exception {
+            checkpointImage.cTime = sig.cTime;
+            checkpointImage.checkpointTime = sig.checkpointTime;
+        
+            // get fsimage
+            String fileid = "getimage=1";
+            Collection<File> list = checkpointImage.getFiles(NameNodeFile.IMAGE,
+                NameNodeDirType.IMAGE);
+            File[] srcNames = list.toArray(new File[list.size()]);
+            assert srcNames.length > 0 : "No checkpoint targets.";
+            TransferFsImage.getFileClient(fsName, fileid, srcNames);
+            LOG.info("Downloaded file " + srcNames[0].getName() + " size " +
+                     srcNames[0].length() + " bytes.");
+        
+            // get edits file
+            fileid = "getedit=1";
+            list = getFSImage().getFiles(NameNodeFile.EDITS, NameNodeDirType.EDITS);
+            srcNames = list.toArray(new File[list.size()]);;
+            assert srcNames.length > 0 : "No checkpoint targets.";
+            TransferFsImage.getFileClient(fsName, fileid, srcNames);
+            LOG.info("Downloaded file " + srcNames[0].getName() + " size " +
+                srcNames[0].length() + " bytes.");
+        
+            checkpointImage.checkpointUploadDone();
+            return null;
+          }
+        });
+      } catch (InterruptedException e) {
+        throw new RuntimeException(e);
+      }
   }
 
   /**
    * Copy the new fsimage into the NameNode
    */
   private void putFSImage(CheckpointSignature sig) throws IOException {
-    String fileid = "putimage=1&port=" + infoPort +
+    String fileid = "putimage=1&port=" + imagePort +
       "&machine=" + infoBindAddress + 
       "&token=" + sig.toString();
     LOG.info("Posted URL " + fsName + fileid);
@@ -292,12 +347,21 @@ public class SecondaryNameNode implement
     if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
       throw new IOException("This is not a DFS");
     }
-    String configuredAddress = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
-                                        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
+
+    String configuredAddress = UserGroupInformation.isSecurityEnabled() ? 
+        conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+            DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)
+      : conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+            DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
     InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
     if (sockAddr.getAddress().isAnyLocalAddress()) {
+      if(UserGroupInformation.isSecurityEnabled()) {
+        throw new IOException("Cannot use a wildcard address with security. " +
+        		"Must explicitly set bind address for Kerberos");
+      }
       return fsName.getHost() + ":" + sockAddr.getPort();
     } else {
+      LOG.debug("configuredAddress = " + configuredAddress);
       return configuredAddress;
     }
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=962380&r1=962379&r2=962380&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Fri Jul  9 01:11:33 2010
@@ -28,6 +28,8 @@ import javax.servlet.http.HttpServletReq
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
+import org.apache.hadoop.security.UserGroupInformation;
+
 
 /**
  * This class provides fetching a specified file from the NameNode.
@@ -154,7 +156,8 @@ class TransferFsImage implements FSConst
   static void getFileClient(String fsName, String id, File[] localPath)
     throws IOException {
     byte[] buf = new byte[BUFFER_SIZE];
-    StringBuilder str = new StringBuilder("http://"+fsName+"/getimage?");
+    String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
+    StringBuilder str = new StringBuilder(proto+fsName+"/getimage?");
     str.append(id);
 
     //