You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ki...@apache.org on 2014/05/31 16:32:44 UTC
svn commit: r1598874 -
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Author: kihwal
Date: Sat May 31 14:32:44 2014
New Revision: 1598874
URL: http://svn.apache.org/r1598874
Log:
Fix merge error.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1598874&r1=1598873&r2=1598874&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sat May 31 14:32:44 2014
@@ -234,6 +234,9 @@ public class DataNode extends Configured
private boolean checkDiskErrorFlag = false;
private Object checkDiskErrorMutex = new Object();
private long lastDiskErrorCheck;
+ private String supergroup;
+ private boolean isPermissionEnabled;
+ private String dnUserName = null;
/**
* Create the DataNode given a configuration, an array of dataDirs,
@@ -255,6 +258,11 @@ public class DataNode extends Configured
this.getHdfsBlockLocationsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
+ this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
+ DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+ this.isPermissionEnabled = conf.getBoolean(
+ DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
+ DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
confVersion = "core-" +
conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
@@ -439,6 +447,33 @@ public class DataNode extends Configured
ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
}
}
+
+ /** Check whether the current user is in the superuser group. */
+ private void checkSuperuserPrivilege() throws IOException, AccessControlException {
+ if (!isPermissionEnabled) {
+ return;
+ }
+ // Try to get the ugi in the RPC call.
+ UserGroupInformation callerUgi = ipcServer.getRemoteUser();
+ if (callerUgi == null) {
+ // This is not from RPC.
+ callerUgi = UserGroupInformation.getCurrentUser();
+ }
+
+ // Is this by the DN user itself?
+ assert dnUserName != null;
+ if (callerUgi.getShortUserName().equals(dnUserName)) {
+ return;
+ }
+
+ // Is the user a member of the super group?
+ List<String> groups = Arrays.asList(callerUgi.getGroupNames());
+ if (groups.contains(supergroup)) {
+ return;
+ }
+ // Not a superuser.
+ throw new AccessControlException();
+ }
/**
* Initialize the datanode's periodic scanners:
@@ -742,6 +777,11 @@ public class DataNode extends Configured
// BlockPoolTokenSecretManager is required to create ipc server.
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
+
+ // Login is done by now. Set the DN user name.
+ dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
+ LOG.info("dnUserName = " + dnUserName);
+ LOG.info("supergroup = " + supergroup);
initIpcServer(conf);
metrics = DataNodeMetrics.create(conf, getDisplayName());
@@ -2421,6 +2461,7 @@ public class DataNode extends Configured
@Override // ClientDatanodeProtocol
public void refreshNamenodes() throws IOException {
+ checkSuperuserPrivilege();
conf = new Configuration();
refreshNamenodes(conf);
}
@@ -2428,6 +2469,7 @@ public class DataNode extends Configured
@Override // ClientDatanodeProtocol
public void deleteBlockPool(String blockPoolId, boolean force)
throws IOException {
+ checkSuperuserPrivilege();
LOG.info("deleteBlockPool command received for block pool " + blockPoolId
+ ", force=" + force);
if (blockPoolManager.get(blockPoolId) != null) {
@@ -2443,6 +2485,7 @@ public class DataNode extends Configured
@Override // ClientDatanodeProtocol
public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
+ checkSuperuserPrivilege();
LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
"). Shutting down Datanode...");
@@ -2609,4 +2652,4 @@ public class DataNode extends Configured
return lastDiskErrorCheck;
}
}
-}
\ No newline at end of file
+}