You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2015/04/14 19:09:27 UTC

[2/2] hadoop git commit: HDFS-6666. Abort NameNode and DataNode startup if security is enabled but block access token is not enabled. Contributed by Vijay Bhat.

HDFS-6666. Abort NameNode and DataNode startup if security is enabled but block access token is not enabled. Contributed by Vijay Bhat.

(cherry picked from commit d45aa7647b1fecf81860ec7b563085be2af99a0b)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9779b79e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9779b79e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9779b79e

Branch: refs/heads/branch-2
Commit: 9779b79ed8371cbfecf77e020236e1f751e8efeb
Parents: a75679c
Author: cnauroth <cn...@apache.org>
Authored: Tue Apr 14 09:59:01 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Apr 14 10:03:53 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/BlockManager.java    |  7 ++--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 14 +++++++
 .../hdfs/server/namenode/FSNamesystem.java      | 10 +++--
 .../sasl/SaslDataTransferTestCase.java          | 41 ++++++++++++++++----
 5 files changed, 60 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9779b79e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c2603b..d47de9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -173,6 +173,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7701. Support reporting per storage type quota and usage
     with hadoop/hdfs shell. (Peter Shi via Arpit Agarwal)
 
+    HDFS-6666. Abort NameNode and DataNode startup if security is enabled but
+    block access token is not enabled. (Vijay Bhat via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9779b79e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f9097b8..ae13a29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -359,7 +359,7 @@ public class BlockManager {
   }
 
   private static BlockTokenSecretManager createBlockTokenSecretManager(
-      final Configuration conf) {
+      final Configuration conf) throws IOException {
     final boolean isEnabled = conf.getBoolean(
         DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
         DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
@@ -367,10 +367,11 @@ public class BlockManager {
 
     if (!isEnabled) {
       if (UserGroupInformation.isSecurityEnabled()) {
-        LOG.error("Security is enabled but block access tokens " +
+        String errMessage = "Security is enabled but block access tokens " +
             "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
             "aren't enabled. This may cause issues " +
-            "when clients attempt to talk to a DataNode.");
+            "when clients attempt to connect to a DataNode. Aborting NameNode";
+        throw new IOException(errMessage);
       }
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9779b79e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 1b3c4ff..eddbddb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1190,6 +1190,20 @@ public class DataNode extends ReconfigurableBase
     if (!UserGroupInformation.isSecurityEnabled()) {
       return;
     }
+
+    // Abort out of inconsistent state if Kerberos is enabled
+    // but block access tokens are not enabled.
+    boolean isEnabled = conf.getBoolean(
+        DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
+        DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
+    if (!isEnabled) {
+      String errMessage = "Security is enabled but block access tokens " +
+          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
+          "aren't enabled. This may cause issues " +
+          "when clients attempt to connect to a DataNode. Aborting DataNode";
+      throw new RuntimeException(errMessage);
+    }
+
     SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
     if (resources != null && saslPropsResolver == null) {
       return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9779b79e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c641886..d2321ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1234,10 +1234,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         cacheManager.stopMonitorThread();
         cacheManager.clearDirectiveStats();
       }
-      blockManager.getDatanodeManager().clearPendingCachingCommands();
-      blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
-      // Don't want to keep replication queues when not in Active.
-      blockManager.clearQueues();
+      if (blockManager != null) {
+        blockManager.getDatanodeManager().clearPendingCachingCommands();
+        blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
+        // Don't want to keep replication queues when not in Active.
+        blockManager.clearQueues();
+      }
       initializedReplQueues = false;
     } finally {
       writeUnlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9779b79e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
index 6f2b3aa..98e4b1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
@@ -33,6 +33,7 @@ import static org.junit.Assert.*;
 import java.io.File;
 import java.util.Properties;
 
+import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpConfig;
@@ -48,10 +49,28 @@ public abstract class SaslDataTransferTestCase {
 
   private static File baseDir;
   private static String hdfsPrincipal;
+  private static String userPrincipal;
   private static MiniKdc kdc;
-  private static String keytab;
+  private static String hdfsKeytab;
+  private static String userKeyTab;
   private static String spnegoPrincipal;
 
+  public static String getUserKeyTab() {
+    return userKeyTab;
+  }
+
+  public static String getUserPrincipal() {
+    return userPrincipal;
+  }
+
+  public static String getHdfsPrincipal() {
+    return hdfsPrincipal;
+  }
+
+  public static String getHdfsKeytab() {
+    return hdfsKeytab;
+  }
+
   @BeforeClass
   public static void initKdc() throws Exception {
     baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
@@ -63,11 +82,17 @@ public abstract class SaslDataTransferTestCase {
     kdc = new MiniKdc(kdcConf, baseDir);
     kdc.start();
 
-    String userName = UserGroupInformation.getLoginUser().getShortUserName();
-    File keytabFile = new File(baseDir, userName + ".keytab");
-    keytab = keytabFile.getAbsolutePath();
-    kdc.createPrincipal(keytabFile, userName + "/localhost", "HTTP/localhost");
-    hdfsPrincipal = userName + "/localhost@" + kdc.getRealm();
+    String userName = RandomStringUtils.randomAlphabetic(8);
+    File userKeytabFile = new File(baseDir, userName + ".keytab");
+    userKeyTab = userKeytabFile.getAbsolutePath();
+    kdc.createPrincipal(userKeytabFile, userName + "/localhost");
+    userPrincipal = userName + "/localhost@" + kdc.getRealm();
+
+    String superUserName = "hdfs";
+    File hdfsKeytabFile = new File(baseDir, superUserName + ".keytab");
+    hdfsKeytab = hdfsKeytabFile.getAbsolutePath();
+    kdc.createPrincipal(hdfsKeytabFile, superUserName + "/localhost", "HTTP/localhost");
+    hdfsPrincipal = superUserName + "/localhost@" + kdc.getRealm();
     spnegoPrincipal = "HTTP/localhost@" + kdc.getRealm();
   }
 
@@ -91,9 +116,9 @@ public abstract class SaslDataTransferTestCase {
     HdfsConfiguration conf = new HdfsConfiguration();
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
     conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
-    conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
+    conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, hdfsKeytab);
     conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
-    conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
+    conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, hdfsKeytab);
     conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
     conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
     conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);