You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2011/03/04 04:44:57 UTC
svn commit: r1077137 [2/5] - in
/hadoop/common/branches/branch-0.20-security-patches: ./
.eclipse.templates/ ivy/
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/
src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/
src/contrib/stream...
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Fri Mar 4 03:44:54 2011
@@ -38,11 +38,9 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-
import org.apache.hadoop.util.*;
import org.apache.commons.logging.*;
@@ -76,7 +74,7 @@ public class DFSClient implements FSCons
private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
public final ClientProtocol namenode;
private final ClientProtocol rpcNamenode;
- final UnixUserGroupInformation ugi;
+ final UserGroupInformation ugi;
volatile boolean clientRunning = true;
Random r = new Random();
final String clientName;
@@ -98,16 +96,12 @@ public class DFSClient implements FSCons
public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
Configuration conf) throws IOException {
- try {
- return createNamenode(createRPCNamenode(nameNodeAddr, conf,
- UnixUserGroupInformation.login(conf, true)));
- } catch (LoginException e) {
- throw (IOException)(new IOException().initCause(e));
- }
+ return createNamenode(createRPCNamenode(nameNodeAddr, conf,
+ UserGroupInformation.getCurrentUser()));
}
private static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
- Configuration conf, UnixUserGroupInformation ugi)
+ Configuration conf, UserGroupInformation ugi)
throws IOException {
return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
ClientProtocol.versionID, nameNodeAddr, ugi, conf,
@@ -196,11 +190,7 @@ public class DFSClient implements FSCons
conf.getInt("dfs.client.max.block.acquire.failures",
MAX_BLOCK_ACQUIRE_FAILURES);
- try {
- this.ugi = UnixUserGroupInformation.login(conf, true);
- } catch (LoginException e) {
- throw (IOException)(new IOException().initCause(e));
- }
+ ugi = UserGroupInformation.getCurrentUser();
String taskId = conf.get("mapred.task.id");
if (taskId != null) {
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java Fri Mar 4 03:44:54 2011
@@ -18,8 +18,11 @@
package org.apache.hadoop.hdfs;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
import java.util.StringTokenizer;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
public class DFSUtil {
/**
@@ -47,5 +50,20 @@ public class DFSUtil {
return true;
}
+ /**
+ * If a keytab has been provided, login as that user.
+ */
+ public static void login(final Configuration conf,
+ final String keytabFileKey,
+ final String userNameKey)
+ throws IOException {
+ String keytabFilename = conf.get(keytabFileKey);
+
+ if(keytabFilename == null)
+ return;
+
+ String user = conf.get(userNameKey, System.getProperty("user.name"));
+ UserGroupInformation.loginUserFromKeytab(user, keytabFilename);
+ }
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java Fri Mar 4 03:44:54 2011
@@ -36,8 +36,6 @@ import java.util.ArrayList;
import java.util.Random;
import java.util.TimeZone;
-import javax.security.auth.login.LoginException;
-
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
@@ -60,7 +58,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.*;
import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.StringUtils;
/** An implementation of a protocol for accessing filesystems over HTTP.
* The following implementation provides a limited, read-only interface
@@ -96,11 +93,7 @@ public class HftpFileSystem extends File
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
setConf(conf);
- try {
- this.ugi = UnixUserGroupInformation.login(conf, true);
- } catch (LoginException le) {
- throw new IOException(StringUtils.stringifyException(le));
- }
+ this.ugi = UserGroupInformation.getCurrentUser();
nnAddr = NetUtils.createSocketAddr(name.toString());
}
@@ -138,7 +131,7 @@ public class HftpFileSystem extends File
@Override
public FSDataInputStream open(Path f, int buffersize) throws IOException {
HttpURLConnection connection = null;
- connection = openConnection("/data" + f.toUri().getPath(), "ugi=" + ugi);
+ connection = openConnection("/data" + f.toUri().getPath(), "ugi=" + ugi.getUserName());
connection.setRequestMethod("GET");
connection.connect();
final InputStream in = connection.getInputStream();
@@ -212,7 +205,7 @@ public class HftpFileSystem extends File
XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
HttpURLConnection connection = openConnection("/listPaths" + path,
- "ugi=" + ugi + (recur? "&recursive=yes" : ""));
+ "ugi=" + ugi.getCurrentUser() + (recur? "&recursive=yes" : ""));
connection.setRequestMethod("GET");
connection.connect();
@@ -278,7 +271,7 @@ public class HftpFileSystem extends File
private FileChecksum getFileChecksum(String f) throws IOException {
final HttpURLConnection connection = openConnection(
- "/fileChecksum" + f, "ugi=" + ugi);
+ "/fileChecksum" + f, "ugi=" + ugi.getUserName());
try {
final XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/AccessTokenHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/AccessTokenHandler.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/AccessTokenHandler.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/AccessTokenHandler.java Fri Mar 4 03:44:54 2011
@@ -225,7 +225,7 @@ public class AccessTokenHandler {
/** Generate an access token for current user */
public BlockAccessToken generateToken(long blockID, EnumSet<AccessMode> modes)
throws IOException {
- UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String userID = (ugi == null ? null : ugi.getUserName());
return generateToken(userID, blockID, modes);
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java Fri Mar 4 03:44:54 2011
@@ -75,7 +75,6 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
@@ -920,12 +919,7 @@ public class Balancer implements Tool {
methodNameToPolicyMap.put("getBlocks", methodPolicy);
methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
- UserGroupInformation ugi;
- try {
- ugi = UnixUserGroupInformation.login(conf);
- } catch (javax.security.auth.login.LoginException e) {
- throw new IOException(StringUtils.stringifyException(e));
- }
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
return (NamenodeProtocol) RetryProxy.create(
NamenodeProtocol.class,
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Mar 4 03:44:54 2011
@@ -46,6 +46,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -84,10 +85,7 @@ import org.apache.hadoop.ipc.RemoteExcep
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
-import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.ReflectionUtils;
@@ -97,6 +95,7 @@ import org.apache.hadoop.util.DiskChecke
import org.apache.hadoop.hdfs.security.BlockAccessToken;
import org.apache.hadoop.hdfs.security.AccessTokenHandler;
import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
/**********************************************************
* DataNode is a class (and program) that stores a set of
@@ -203,6 +202,9 @@ public class DataNode extends Configured
private static final Random R = new Random();
+ private final static String KEYTAB_FILE_KEY = "dfs.datanode.keytab.file";
+ private final static String USER_NAME_KEY = "dfs.datanode.user.name.key";
+
// For InterDataNodeProtocol
public Server ipcServer;
@@ -213,14 +215,17 @@ public class DataNode extends Configured
static long now() {
return System.currentTimeMillis();
}
-
+
/**
* Create the DataNode given a configuration and an array of dataDirs.
* 'dataDirs' is where the blocks are stored.
*/
- DataNode(Configuration conf,
- AbstractList<File> dataDirs) throws IOException {
+ DataNode(final Configuration conf,
+ final AbstractList<File> dataDirs) throws IOException {
super(conf);
+ UserGroupInformation.setConfiguration(conf);
+ DFSUtil.login(conf, KEYTAB_FILE_KEY, USER_NAME_KEY);
+
datanodeObject = this;
try {
@@ -391,12 +396,7 @@ public class DataNode extends Configured
// set service-level authorization security policy
if (conf.getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
- PolicyProvider policyProvider =
- (PolicyProvider)(ReflectionUtils.newInstance(
- conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
- HDFSPolicyProvider.class, PolicyProvider.class),
- conf));
- SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+ ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
}
//init ipc server
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java Fri Mar 4 03:44:54 2011
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
/**
@@ -46,29 +45,43 @@ abstract class DfsServlet extends HttpSe
static final Log LOG = LogFactory.getLog(DfsServlet.class.getCanonicalName());
- /** Get {@link UserGroupInformation} from request */
- protected UnixUserGroupInformation getUGI(HttpServletRequest request) {
- String ugi = request.getParameter("ugi");
- try {
- return new UnixUserGroupInformation(ugi.split(","));
- }
- catch(Exception e) {
- LOG.warn("Invalid ugi (= " + ugi + ")");
+ /** Get {@link UserGroupInformation} from request
+ * * @throws IOException */
+ protected UserGroupInformation getUGI(HttpServletRequest request)
+ throws IOException {
+ UserGroupInformation u = null;
+ if(UserGroupInformation.isSecurityEnabled()) {
+ String user = request.getRemoteUser();
+ if(user != null)
+ throw new IOException("Security enabled but user not " +
+ "authenticated by filter");
+
+ u = UserGroupInformation.createRemoteUser(user);
+ } else { // Security's not on, pull from url
+ String ugi = request.getParameter("ugi");
+
+ if(ugi == null) // not specified in request
+ ugi = new Configuration().get(JspHelper.WEB_UGI_PROPERTY_NAME);
+
+ if(ugi == null) // not specified in conf either
+ throw new IOException("Cannot determine UGI from request or conf");
+
+ u = UserGroupInformation.createRemoteUser(ugi);
}
- return JspHelper.webUGI;
+
+ if(LOG.isDebugEnabled())
+ LOG.debug("getUGI is returning: " + u.getUserName());
+ return u;
}
/**
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
*/
- protected ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi
- ) throws IOException {
+ protected ClientProtocol createNameNodeProxy() throws IOException {
ServletContext context = getServletContext();
InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
Configuration conf = new Configuration(
(Configuration)context.getAttribute("name.conf"));
- UnixUserGroupInformation.saveToConf(conf,
- UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
return DFSClient.createNamenode(nnAddr, conf);
}
@@ -83,7 +96,7 @@ abstract class DfsServlet extends HttpSe
: host.getInfoPort();
final String filename = request.getPathInfo();
return new URI(scheme, null, hostname, port, servletpath,
- "filename=" + filename + "&ugi=" + ugi, null);
+ "filename=" + filename + "&ugi=" + ugi.getUserName(), null);
}
/** Get filename from the request */
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Mar 4 03:44:54 2011
@@ -32,8 +32,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.hdfs.security.AccessTokenHandler;
import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
-import org.apache.hadoop.security.PermissionChecker;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -408,11 +406,7 @@ public class FSNamesystem implements FSC
private void setConfigurationParameters(Configuration conf)
throws IOException {
fsNamesystemObject = this;
- try {
- fsOwner = UnixUserGroupInformation.login(conf);
- } catch (LoginException e) {
- throw new IOException(StringUtils.stringifyException(e));
- }
+ fsOwner = UserGroupInformation.getCurrentUser();
LOG.info("fsOwner=" + fsOwner);
this.supergroup = conf.get("dfs.permissions.supergroup", "supergroup");
@@ -738,7 +732,7 @@ public class FSNamesystem implements FSC
getEditLog().logSync();
if (auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(src);
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"setPermission", src, null, stat);
}
@@ -764,7 +758,7 @@ public class FSNamesystem implements FSC
getEditLog().logSync();
if (auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(src);
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"setOwner", src, null, stat);
}
@@ -817,7 +811,7 @@ public class FSNamesystem implements FSC
final LocatedBlocks ret = getBlockLocationsInternal(src,
offset, length, Integer.MAX_VALUE, doAccessTime);
if (auditLog.isInfoEnabled()) {
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"open", src, null, null);
}
@@ -924,7 +918,7 @@ public class FSNamesystem implements FSC
dir.setTimes(src, inode, mtime, atime, true);
if (auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(src);
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"setTimes", src, null, stat);
}
@@ -951,7 +945,7 @@ public class FSNamesystem implements FSC
boolean status = setReplicationInternal(src, replication);
getEditLog().logSync();
if (status && auditLog.isInfoEnabled()) {
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"setReplication", src, null, null);
}
@@ -1039,7 +1033,7 @@ public class FSNamesystem implements FSC
getEditLog().logSync();
if (auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(src);
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"create", src, null, stat);
}
@@ -1271,7 +1265,7 @@ public class FSNamesystem implements FSC
}
if (auditLog.isInfoEnabled()) {
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"append", src, null, null);
}
@@ -1701,7 +1695,7 @@ public class FSNamesystem implements FSC
getEditLog().logSync();
if (status && auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(dst);
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"rename", src, dst, stat);
}
@@ -1745,7 +1739,7 @@ public class FSNamesystem implements FSC
boolean status = deleteInternal(src, true);
getEditLog().logSync();
if (status && auditLog.isInfoEnabled()) {
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"delete", src, null, null);
}
@@ -1801,7 +1795,7 @@ public class FSNamesystem implements FSC
getEditLog().logSync();
if (status && auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(src);
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"mkdirs", src, null, stat);
}
@@ -2056,7 +2050,7 @@ public class FSNamesystem implements FSC
}
}
if (auditLog.isInfoEnabled()) {
- logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ logAuditEvent(UserGroupInformation.getCurrentUser(),
Server.getRemoteIp(),
"listStatus", src, null, null);
}
@@ -4908,7 +4902,7 @@ public class FSNamesystem implements FSC
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
- String user = UserGroupInformation.getCurrentUGI().getUserName();
+ String user = UserGroupInformation.getCurrentUser().getUserName();
Text owner = new Text(user);
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, renewer);
return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
@@ -4916,13 +4910,13 @@ public class FSNamesystem implements FSC
public Boolean renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws InvalidToken, IOException {
- String renewer = UserGroupInformation.getCurrentUGI().getUserName();
+ String renewer = UserGroupInformation.getCurrentUser().getUserName();
return dtSecretManager.renewToken(token, renewer);
}
public Boolean cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
- String canceller = UserGroupInformation.getCurrentUGI().getUserName();
+ String canceller = UserGroupInformation.getCurrentUser().getUserName();
return dtSecretManager.cancelToken(token, canceller);
}
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java Fri Mar 4 03:44:54 2011
@@ -17,23 +17,63 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.util.*;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Stack;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.PermissionChecker;
import org.apache.hadoop.security.UserGroupInformation;
/** Perform permission checking in {@link FSNamesystem}. */
-class FSPermissionChecker extends PermissionChecker {
+class FSPermissionChecker {
static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
+private final UserGroupInformation ugi;
+ public final String user;
+ private final Set<String> groups = new HashSet<String>();
+ public final boolean isSuper;
+
FSPermissionChecker(String fsOwner, String supergroup
) throws AccessControlException{
- super(fsOwner, supergroup);
+ try {
+ ugi = UserGroupInformation.getCurrentUser();
+ } catch (IOException e) {
+ throw new AccessControlException(e);
+ }
+
+ groups.addAll(Arrays.asList(ugi.getGroupNames()));
+ user = ugi.getUserName();
+
+ isSuper = user.equals(fsOwner) || groups.contains(supergroup);
+ }
+
+ /**
+ * Check if the callers group contains the required values.
+ * @param group group to check
+ */
+ public boolean containsGroup(String group) {return groups.contains(group);}
+
+ /**
+ * Verify if the caller has the required permission. This will result into
+ * an exception if the caller is not allowed to access the resource.
+ * @param owner owner of the system
+ * @param supergroup supergroup of the system
+ */
+ public static void checkSuperuserPrivilege(UserGroupInformation owner,
+ String supergroup)
+ throws AccessControlException {
+ FSPermissionChecker checker =
+ new FSPermissionChecker(owner.getUserName(), supergroup);
+ if (!checker.isSuper) {
+ throw new AccessControlException("Access denied for user "
+ + checker.user + ". Superuser privilege is required");
+ }
}
/**
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Fri Mar 4 03:44:54 2011
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.PrintWriter;
import java.net.URI;
import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
import javax.net.SocketFactory;
import javax.servlet.ServletContext;
@@ -38,7 +39,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.znerd.xmlenc.XMLOutputter;
@@ -76,7 +76,6 @@ public class FileChecksumServlets {
/** {@inheritDoc} */
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
- final UnixUserGroupInformation ugi = getUGI(request);
final PrintWriter out = response.getWriter();
final String filename = getFilename(request, response);
final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
@@ -85,19 +84,27 @@ public class FileChecksumServlets {
final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
- UnixUserGroupInformation.saveToConf(conf,
- UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
- final ClientProtocol nnproxy = DFSClient.createNamenode(conf);
try {
+ ClientProtocol nnproxy = getUGI(request).doAs(new PrivilegedExceptionAction<ClientProtocol>() {
+ @Override
+ public ClientProtocol run() throws IOException {
+ return DFSClient.createNamenode(conf);
+ }
+ });
+
final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
filename, nnproxy, socketFactory, socketTimeout);
MD5MD5CRC32FileChecksum.write(xml, checksum);
} catch(IOException ioe) {
new RemoteException(ioe.getClass().getName(), ioe.getMessage()
).writeXml(filename, xml);
+ } catch (InterruptedException e) {
+ new RemoteException(e.getClass().getName(), e.getMessage()
+ ).writeXml(filename, xml);
+
}
xml.endDocument();
}
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Fri Mar 4 03:44:54 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.security.PrivilegedExceptionAction;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
/** Redirect queries about the hosted filesystem to an appropriate datanode.
* @see org.apache.hadoop.hdfs.HftpFileSystem
@@ -36,7 +37,7 @@ import org.apache.hadoop.security.UnixUs
public class FileDataServlet extends DfsServlet {
/** Create a redirection URI */
- protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
+ protected URI createUri(FileStatus i, UserGroupInformation ugi,
ClientProtocol nnproxy, HttpServletRequest request)
throws IOException, URISyntaxException {
String scheme = request.getScheme();
@@ -51,7 +52,8 @@ public class FileDataServlet extends Dfs
"https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort(),
- "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
+ "/streamFile", "filename=" + i.getPath() +
+ "&ugi=" + ugi.getUserName(), null);
}
private static JspHelper jspHelper = null;
@@ -85,12 +87,20 @@ public class FileDataServlet extends Dfs
*/
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws IOException {
- final UnixUserGroupInformation ugi = getUGI(request);
- final ClientProtocol nnproxy = createNameNodeProxy(ugi);
+ final UserGroupInformation ugi = getUGI(request);
try {
- final String path = request.getPathInfo() != null
- ? request.getPathInfo() : "/";
+ final ClientProtocol nnproxy = ugi
+ .doAs(new PrivilegedExceptionAction<ClientProtocol>() {
+ @Override
+ public ClientProtocol run() throws IOException {
+ return createNameNodeProxy();
+ }
+ });
+
+ final String path = request.getPathInfo() != null ?
+ request.getPathInfo() : "/";
+
FileStatus info = nnproxy.getFileInfo(path);
if ((info != null) && !info.isDir()) {
response.sendRedirect(createUri(info, ugi, nnproxy,
@@ -104,6 +114,8 @@ public class FileDataServlet extends Dfs
response.getWriter().println(e.toString());
} catch (IOException e) {
response.sendError(400, e.getMessage());
+ } catch (InterruptedException e) {
+ response.sendError(400, e.getMessage());
}
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java Fri Mar 4 03:44:54 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
import java.util.Map;
import javax.servlet.ServletContext;
@@ -27,7 +28,6 @@ import javax.servlet.http.HttpServletRes
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
/**
@@ -44,19 +44,25 @@ public class FsckServlet extends DfsServ
final Map<String,String[]> pmap = request.getParameterMap();
final PrintWriter out = response.getWriter();
- final UnixUserGroupInformation ugi = getUGI(request);
- UserGroupInformation.setCurrentUser(ugi);
-
- final ServletContext context = getServletContext();
- final Configuration conf = new Configuration((Configuration) context.getAttribute("name.conf"));
- UnixUserGroupInformation.saveToConf(conf,
- UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-
- final NameNode nn = (NameNode) context.getAttribute("name.node");
- final int totalDatanodes = nn.namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
- final short minReplication = nn.namesystem.getMinReplication();
-
- new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
- totalDatanodes, minReplication).fsck();
+ final UserGroupInformation ugi = getUGI(request);
+ try {
+ ugi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final ServletContext context = getServletContext();
+ final Configuration conf = new Configuration((Configuration) context.getAttribute("name.conf"));
+
+ final NameNode nn = (NameNode) context.getAttribute("name.node");
+ final int totalDatanodes = nn.namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
+ final short minReplication = nn.namesystem.getMinReplication();
+
+ new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
+ totalDatanodes, minReplication).fsck();
+ return null;
+ }
+ });
+ } catch (InterruptedException e) {
+ response.sendError(400, e.getMessage());
+ }
}
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java Fri Mar 4 03:44:54 2011
@@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.hdfs.security.BlockAccessToken;
public class JspHelper {
@@ -52,10 +51,7 @@ public class JspHelper {
static FSNamesystem fsn = null;
public static InetSocketAddress nameNodeAddr;
public static final Configuration conf = new Configuration();
- public static final UnixUserGroupInformation webUGI
- = UnixUserGroupInformation.createImmutable(
- conf.getStrings(WEB_UGI_PROPERTY_NAME));
-
+
public static final int defaultChunkSizeToView =
conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
static Random rand = new Random();
@@ -68,9 +64,6 @@ public class JspHelper {
else {
nameNodeAddr = fsn.getDFSNameNodeAddress();
}
-
- UnixUserGroupInformation.saveToConf(conf,
- UnixUserGroupInformation.UGI_PROPERTY_NAME, webUGI);
}
public DatanodeID randomNode() throws IOException {
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Fri Mar 4 03:44:54 2011
@@ -21,13 +21,13 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.*;
import java.io.IOException;
import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
@@ -125,7 +125,6 @@ public class ListPathsServlet extends Df
*/
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
- final UnixUserGroupInformation ugi = getUGI(request);
final PrintWriter out = response.getWriter();
final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
try {
@@ -134,8 +133,15 @@ public class ListPathsServlet extends Df
final boolean recur = "yes".equals(root.get("recursive"));
final Pattern filter = Pattern.compile(root.get("filter"));
final Pattern exclude = Pattern.compile(root.get("exclude"));
- ClientProtocol nnproxy = createNameNodeProxy(ugi);
-
+
+ ClientProtocol nnproxy =
+ getUGI(request).doAs(new PrivilegedExceptionAction<ClientProtocol>() {
+ @Override
+ public ClientProtocol run() throws IOException {
+ return createNameNodeProxy();
+ }
+ });
+
doc.declaration();
doc.startTag("listing");
for (Map.Entry<String,String> m : root.entrySet()) {
@@ -173,6 +179,9 @@ public class ListPathsServlet extends Df
if (doc != null) {
doc.endDocument();
}
+ } catch (InterruptedException e) {
+ LOG.warn("ListPathServlet encountered InterruptedException", e);
+ response.sendError(400, e.getMessage());
} finally {
if (out != null) {
out.close();
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Mar 4 03:44:54 2011
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -50,17 +51,17 @@ import org.apache.hadoop.util.StringUtil
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
-import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
-import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.hdfs.security.token.DelegationTokenIdentifier;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
import java.io.*;
import java.net.*;
@@ -189,12 +190,7 @@ public class NameNode implements ClientP
if (serviceAuthEnabled =
conf.getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
- PolicyProvider policyProvider =
- (PolicyProvider)(ReflectionUtils.newInstance(
- conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
- HDFSPolicyProvider.class, PolicyProvider.class),
- conf));
- SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+ ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
}
// create rpc server
@@ -262,6 +258,9 @@ public class NameNode implements ClientP
LOG.info("Web-server up at: " + infoHost + ":" + infoPort);
}
+ private final static String KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
+ private final static String USER_NAME_KEY = "dfs.namenode.user.name.key";
+
/**
* Start NameNode.
* <p>
@@ -285,6 +284,9 @@ public class NameNode implements ClientP
* @throws IOException
*/
public NameNode(Configuration conf) throws IOException {
+ UserGroupInformation.setConfiguration(conf);
+ DFSUtil.login(conf, KEYTAB_FILE_KEY, USER_NAME_KEY);
+
try {
initialize(conf);
} catch (IOException e) {
@@ -401,7 +403,7 @@ public class NameNode implements ClientP
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.startFile(src,
- new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
+ new PermissionStatus(UserGroupInformation.getCurrentUser().getUserName(),
null, masked),
clientName, clientMachine, overwrite, replication, blockSize);
myMetrics.numFilesCreated.inc();
@@ -566,7 +568,7 @@ public class NameNode implements ClientP
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
return namesystem.mkdirs(src,
- new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
+ new PermissionStatus(UserGroupInformation.getCurrentUser().getUserName(),
null, masked));
}
@@ -920,14 +922,15 @@ public class NameNode implements ClientP
throw new AuthorizationException("Service Level Authorization not enabled!");
}
- SecurityUtil.getPolicy().refresh();
+ ServiceAuthorizationManager.refresh(
+ new Configuration(), new HDFSPolicyProvider());
}
@Override
public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
- UserGroupInformation.getCurrentUGI().getUserName());
- SecurityUtil.getUserToGroupsMappingService(conf).refresh();
+ UserGroupInformation.getCurrentUser().getUserName());
+ Groups.getUserToGroupsMappingService(conf).refresh();
}
private static void printUsage() {
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java Fri Mar 4 03:44:54 2011
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import java.io.IOException;
import java.util.*;
import org.apache.commons.logging.Log;
@@ -30,30 +31,48 @@ import org.apache.hadoop.security.UserGr
class PermissionChecker {
static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
+ private final UserGroupInformation ugi;
final String user;
private final Set<String> groups = new HashSet<String>();
final boolean isSuper;
PermissionChecker(String fsOwner, String supergroup
) throws AccessControlException{
- UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+ try {
+ ugi = UserGroupInformation.getCurrentUser();
+ } catch (IOException e) {
+ throw new AccessControlException(e);
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("ugi=" + ugi);
}
- if (ugi != null) {
- user = ugi.getUserName();
- groups.addAll(Arrays.asList(ugi.getGroupNames()));
- isSuper = user.equals(fsOwner) || groups.contains(supergroup);
- }
- else {
- throw new AccessControlException("ugi = null");
- }
+ user = ugi.getUserName();
+ groups.addAll(Arrays.asList(ugi.getGroupNames()));
+ isSuper = user.equals(fsOwner) || groups.contains(supergroup);
}
boolean containsGroup(String group) {return groups.contains(group);}
/**
+ * Verify if the caller has the required permission. This will result into
+ * an exception if the caller is not allowed to access the resource.
+ * @param owner owner of the system
+ * @param supergroup supergroup of the system
+ */
+ public static void checkSuperuserPrivilege(UserGroupInformation owner,
+ String supergroup)
+ throws AccessControlException {
+ PermissionChecker checker =
+ new PermissionChecker(owner.getUserName(), supergroup);
+ if (!checker.isSuper) {
+ throw new AccessControlException("Access denied for user "
+ + checker.user + ". Superuser privilege is required");
+ }
+ }
+
+
+ /**
* Check whether current user have permissions to access the path.
* Traverse is always checked.
*
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Fri Mar 4 03:44:54 2011
@@ -21,10 +21,11 @@ import javax.servlet.*;
import javax.servlet.http.*;
import java.io.*;
import java.net.*;
+import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.*;
public class StreamFile extends DfsServlet {
@@ -39,11 +40,18 @@ public class StreamFile extends DfsServl
/** getting a client for connecting to dfs */
protected DFSClient getDFSClient(HttpServletRequest request)
- throws IOException {
- Configuration conf = new Configuration(masterConf);
- UnixUserGroupInformation.saveToConf(conf,
- UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
- return new DFSClient(nameNodeAddr, conf);
+ throws IOException, InterruptedException {
+ final Configuration conf = new Configuration(masterConf);
+
+ DFSClient client =
+ getUGI(request).doAs(new PrivilegedExceptionAction<DFSClient>() {
+ @Override
+ public DFSClient run() throws IOException {
+ return new DFSClient(nameNodeAddr, conf);
+ }
+ });
+
+ return client;
}
public void doGet(HttpServletRequest request, HttpServletResponse response)
@@ -55,7 +63,15 @@ public class StreamFile extends DfsServl
out.print("Invalid input");
return;
}
- DFSClient dfs = getDFSClient(request);
+
+ DFSClient dfs;
+ try {
+ dfs = getDFSClient(request);
+ } catch (InterruptedException e) {
+ response.sendError(400, e.getMessage());
+ return;
+ }
+
FSInputStream in = dfs.open(filename);
OutputStream os = response.getOutputStream();
response.setHeader("Content-Disposition", "attachment; filename=\"" +
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Mar 4 03:44:54 2011
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.util.List;
-import javax.security.auth.login.LoginException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
@@ -40,7 +38,7 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@@ -611,16 +609,9 @@ public class DFSAdmin extends FsShell {
return 0;
}
- private static UnixUserGroupInformation getUGI(Configuration conf)
+ private static UserGroupInformation getUGI()
throws IOException {
- UnixUserGroupInformation ugi = null;
- try {
- ugi = UnixUserGroupInformation.login(conf, true);
- } catch (LoginException e) {
- throw (IOException)(new IOException(
- "Failed to get the current user's information.").initCause(e));
- }
- return ugi;
+ return UserGroupInformation.getCurrentUser();
}
/**
@@ -637,7 +628,7 @@ public class DFSAdmin extends FsShell {
(RefreshAuthorizationPolicyProtocol)
RPC.getProxy(RefreshAuthorizationPolicyProtocol.class,
RefreshAuthorizationPolicyProtocol.versionID,
- NameNode.getAddress(conf), getUGI(conf), conf,
+ NameNode.getAddress(conf), getUGI(), conf,
NetUtils.getSocketFactory(conf,
RefreshAuthorizationPolicyProtocol.class));
@@ -661,7 +652,7 @@ public class DFSAdmin extends FsShell {
(RefreshUserToGroupMappingsProtocol)
RPC.getProxy(RefreshUserToGroupMappingsProtocol.class,
RefreshUserToGroupMappingsProtocol.versionID,
- NameNode.getAddress(conf), getUGI(conf), conf,
+ NameNode.getAddress(conf), getUGI(), conf,
NetUtils.getSocketFactory(conf,
RefreshUserToGroupMappingsProtocol.class));
Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java Fri Mar 4 03:44:54 2011
@@ -25,13 +25,10 @@ import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
-import javax.security.auth.login.LoginException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -71,11 +68,10 @@ public class DFSck extends Configured im
/**
* Filesystem checker.
* @param conf current Configuration
- * @throws LoginException if login failed
*/
- public DFSck(Configuration conf) throws LoginException {
+ public DFSck(Configuration conf) throws IOException {
super(conf);
- this.ugi = UnixUserGroupInformation.login(conf, true);
+ this.ugi = UserGroupInformation.getCurrentUser();
}
private String getInfoServer() throws IOException {
@@ -112,7 +108,7 @@ public class DFSck extends Configured im
}
final StringBuffer url = new StringBuffer("http://");
- url.append(getInfoServer()).append("/fsck?ugi=").append(ugi).append("&path=");
+ url.append(getInfoServer()).append("/fsck?ugi=").append(ugi.getUserName()).append("&path=");
String dir = "/";
// find top-level dir first
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java Fri Mar 4 03:44:54 2011
@@ -27,6 +27,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
@@ -142,8 +143,9 @@ public class IsolationRunner {
*/
boolean run(String[] args)
throws ClassNotFoundException, IOException, InterruptedException {
- if (args.length != 1) {
- System.out.println("Usage: IsolationRunner <path>/job.xml");
+ if (args.length < 1) {
+ System.out.println("Usage: IsolationRunner <path>/job.xml " +
+ "<optional-user-name>");
return false;
}
File jobFilename = new File(args[0]);
@@ -151,7 +153,14 @@ public class IsolationRunner {
System.out.println(jobFilename + " is not a valid job file.");
return false;
}
+ String user;
+ if (args.length > 1) {
+ user = args[1];
+ } else {
+ user = UserGroupInformation.getCurrentUser().getUserName();
+ }
JobConf conf = new JobConf(new Path(jobFilename.toString()));
+ conf.setUser(user);
TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id"));
if (taskId == null) {
System.out.println("mapred.task.id not found in configuration;" +
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobClient.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobClient.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobClient.java Fri Mar 4 03:44:54 2011
@@ -41,6 +41,7 @@ import java.util.List;
import java.util.Map;
import javax.security.auth.login.LoginException;
+import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -66,7 +67,7 @@ import org.apache.hadoop.mapreduce.JobSu
import org.apache.hadoop.mapreduce.security.TokenStorage;
import org.apache.hadoop.mapreduce.split.JobSplitWriter;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
@@ -430,7 +431,8 @@ public class JobClient extends Configure
private JobSubmissionProtocol createRPCProxy(InetSocketAddress addr,
Configuration conf) throws IOException {
return (JobSubmissionProtocol) RPC.getProxy(JobSubmissionProtocol.class,
- JobSubmissionProtocol.versionID, addr, getUGI(conf), conf,
+ JobSubmissionProtocol.versionID, addr,
+ UserGroupInformation.getCurrentUser(), conf,
NetUtils.getSocketFactory(conf, JobSubmissionProtocol.class));
}
@@ -561,12 +563,6 @@ public class JobClient extends Configure
String libjars = job.get("tmpjars");
String archives = job.get("tmparchives");
- /*
- * set this user's id in job configuration, so later job files can be
- * accessed using this user's id
- */
- setUGIAndUserGroupNames(job);
-
//
// Figure out what fs the JobTracker is using. Copy the
// job to it, under a temporary name. This allows DFS to work,
@@ -662,32 +658,6 @@ public class JobClient extends Configure
"See JobConf(Class) or JobConf#setJar(String).");
}
}
-
- /**
- * Set the UGI, user name and the group name for the job.
- *
- * @param job
- * @throws IOException
- */
- void setUGIAndUserGroupNames(JobConf job)
- throws IOException {
- UnixUserGroupInformation ugi = getUGI(job);
- job.setUser(ugi.getUserName());
- if (ugi.getGroupNames().length > 0) {
- job.set("group.name", ugi.getGroupNames()[0]);
- }
- }
-
- private UnixUserGroupInformation getUGI(Configuration job) throws IOException {
- UnixUserGroupInformation ugi = null;
- try {
- ugi = UnixUserGroupInformation.login(job, true);
- } catch (LoginException e) {
- throw (IOException)(new IOException(
- "Failed to get the current user's information.").initCause(e));
- }
- return ugi;
- }
/**
* Submit a job to the MR system.
@@ -836,7 +806,8 @@ public class JobClient extends Configure
// sort the splits into order based on size, so that the biggest
// go first
Arrays.sort(array, new SplitComparator());
- JobSplitWriter.createSplitFiles(jobSubmitDir, conf, array);
+ JobSplitWriter.createSplitFiles(jobSubmitDir, conf,
+ jobSubmitDir.getFileSystem(conf), array);
return array.length;
}
@@ -878,7 +849,8 @@ public class JobClient extends Configure
}
}
});
- JobSplitWriter.createSplitFiles(jobSubmitDir, job, splits);
+ JobSplitWriter.createSplitFiles(jobSubmitDir, job,
+ jobSubmitDir.getFileSystem(job), splits);
return splits.length;
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobHistory.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobHistory.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobHistory.java Fri Mar 4 03:44:54 2011
@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
/**
@@ -827,7 +828,10 @@ public class JobHistory {
*/
private static String getNewJobHistoryFileName(JobConf jobConf, JobID id) {
return JOBTRACKER_UNIQUE_STRING
- + id.toString() + "_" + getUserName(jobConf) + "_"
+ + id.toString() + "_" +
+ UserGroupInformation.createRemoteUser(getUserName(jobConf)).
+ getShortUserName()
+ + "_"
+ trimJobName(getJobName(jobConf));
}
@@ -872,9 +876,9 @@ public class JobHistory {
private static synchronized String getJobHistoryFileName(JobConf jobConf,
JobID id, Path dir, FileSystem fs)
throws IOException {
- String user = getUserName(jobConf);
+ String user = UserGroupInformation.createRemoteUser(getUserName(jobConf)).
+ getShortUserName();
String jobName = trimJobName(getJobName(jobConf));
-
if (LOG_DIR == null) {
return null;
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobInProgress.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobInProgress.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobInProgress.java Fri Mar 4 03:44:54 2011
@@ -18,6 +18,7 @@
package org.apache.hadoop.mapred;
import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -51,7 +52,7 @@ import org.apache.hadoop.metrics.Metrics
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.mapreduce.TaskType;
@@ -289,18 +290,20 @@ class JobInProgress {
* to the tracker.
*/
public JobInProgress(JobID jobid, JobTracker jobtracker,
- JobConf default_conf) throws IOException {
+ JobConf default_conf)
+ throws IOException, InterruptedException {
this(jobid, jobtracker, default_conf, 0);
}
public JobInProgress(JobID jobid, JobTracker jobtracker,
- JobConf default_conf, int rCount) throws IOException {
+ JobConf default_conf, int rCount)
+ throws IOException, InterruptedException {
this(jobtracker, default_conf, null, rCount, null);
}
- JobInProgress(JobTracker jobtracker, JobConf default_conf,
+ JobInProgress(JobTracker jobtracker, final JobConf default_conf,
JobInfo jobInfo, int rCount, TokenStorage ts)
- throws IOException {
+ throws IOException, InterruptedException {
this.restartCount = rCount;
this.jobId = JobID.downgrade(jobInfo.getJobID());
String url = "http://" + jobtracker.getJobTrackerMachine() + ":"
@@ -314,12 +317,12 @@ class JobInProgress {
// use the user supplied token to add user credentials to the conf
jobSubmitDir = jobInfo.getJobSubmitDir();
- String user = jobInfo.getUser().toString();
- conf = new JobConf();
- conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME,
- new UnixUserGroupInformation(user,
- new String[]{UnixUserGroupInformation.DEFAULT_GROUP}).toString());
- fs = jobSubmitDir.getFileSystem(conf);
+ user = jobInfo.getUser().toString();
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
+ fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ public FileSystem run() throws IOException {
+ return jobSubmitDir.getFileSystem(default_conf);
+ }});
this.localJobFile = default_conf.getLocalPath(JobTracker.SUBDIR
+"/"+jobId + ".xml");
Path jobFilePath = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
@@ -451,6 +454,13 @@ class JobInProgress {
public boolean inited() {
return tasksInited.get();
}
+
+ /**
+ * Get the user for the job
+ */
+ public String getUser() {
+ return user;
+ }
boolean hasRestarted() {
return restartCount > 0;
@@ -2852,7 +2862,7 @@ class JobInProgress {
Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID());
new CleanupQueue().addToQueue(new PathDeletionContext(
- jobtracker.getFileSystem(tempDir), tempDir.toUri().getPath()));
+ jobtracker.getFileSystem(), tempDir.toUri().getPath()));
} catch (IOException e) {
LOG.warn("Error cleaning up "+profile.getJobID()+": "+e);
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java Fri Mar 4 03:44:54 2011
@@ -148,7 +148,7 @@ class JobQueueClient extends Configured
private void displayQueueAclsInfoForCurrentUser() throws IOException {
QueueAclsInfo[] queueAclsInfoList = jc.getQueueAclsForCurrentUser();
- UserGroupInformation ugi = UserGroupInformation.readFrom(getConf());
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
if (queueAclsInfoList.length > 0) {
System.out.println("Queue acls for user : "
+ ugi.getUserName());
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTracker.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTracker.java Fri Mar 4 03:44:54 2011
@@ -32,6 +32,7 @@ import java.io.Writer;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
+import java.security.PrivilegedExceptionAction;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
@@ -55,8 +56,6 @@ import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
-import javax.security.auth.login.LoginException;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -87,14 +86,10 @@ import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.PermissionChecker;
+import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
-import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
import org.apache.hadoop.util.HostsFileReader;
@@ -1494,7 +1489,6 @@ public class JobTracker implements MRCon
void updateRestartCount() throws IOException {
Path restartFile = getRestartCountFile();
Path tmpRestartFile = getTempRestartCountFile();
- FileSystem fs = restartFile.getFileSystem(conf);
FsPermission filePerm = new FsPermission(SYSTEM_FILE_PERMISSION);
// read the count from the jobtracker info file
@@ -1621,7 +1615,7 @@ public class JobTracker implements MRCon
// 2. Check if the user has appropriate access
// Get the user group info for the job's owner
UserGroupInformation ugi =
- UserGroupInformation.readFrom(job.getJobConf());
+ UserGroupInformation.createRemoteUser(job.getJobConf().getUser());
LOG.info("Submitting job " + id + " on behalf of user "
+ ugi.getUserName() + " in groups : "
+ StringUtils.arrayToString(ugi.getGroupNames()));
@@ -1926,15 +1920,27 @@ public class JobTracker implements MRCon
JobTracker(JobConf conf) throws IOException, InterruptedException {
this(conf, generateNewIdentifier());
}
+
+ public static final String JT_USER_NAME = "mapreduce.jobtracker.user.name";
+ public static final String JT_KEYTAB_FILE =
+ "mapreduce.jobtracker.keytab.file";
- JobTracker(JobConf conf, String identifier)
+ JobTracker(final JobConf conf, String identifier)
throws IOException, InterruptedException {
// find the owner of the process
- try {
- mrOwner = UnixUserGroupInformation.login(conf);
- } catch (LoginException e) {
- throw new IOException(StringUtils.stringifyException(e));
+ // get the desired principal to load
+ String keytabFilename = conf.get(JT_KEYTAB_FILE);
+ UserGroupInformation.setConfiguration(conf);
+ if (keytabFilename != null) {
+ String desiredUser = conf.get(JT_USER_NAME,
+ System.getProperty("user.name"));
+ UserGroupInformation.loginUserFromKeytab(desiredUser,
+ keytabFilename);
+ mrOwner = UserGroupInformation.getLoginUser();
+ } else {
+ mrOwner = UserGroupInformation.getCurrentUser();
}
+
supergroup = conf.get("mapred.permissions.supergroup", "supergroup");
LOG.info("Starting jobtracker with owner as " + mrOwner.getUserName()
+ " and supergroup as " + supergroup);
@@ -1999,12 +2005,7 @@ public class JobTracker implements MRCon
// Set service-level authorization security policy
if (conf.getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
- PolicyProvider policyProvider =
- (PolicyProvider)(ReflectionUtils.newInstance(
- conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
- MapReducePolicyProvider.class, PolicyProvider.class),
- conf));
- SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+ ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
}
int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10);
@@ -2071,7 +2072,10 @@ public class JobTracker implements MRCon
try {
// if we haven't contacted the namenode go ahead and do it
if (fs == null) {
- fs = FileSystem.get(conf);
+ fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ public FileSystem run() throws IOException {
+ return FileSystem.get(conf);
+ }});
}
// clean up the system dir, which will only work if hdfs is out of
// safe mode
@@ -2087,7 +2091,7 @@ public class JobTracker implements MRCon
if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
LOG.warn("Incorrect permissions on " + systemDir +
". Setting it to " + SYSTEM_DIR_PERMISSION);
- fs.setPermission(systemDir, SYSTEM_DIR_PERMISSION);
+ fs.setPermission(systemDir,new FsPermission(SYSTEM_DIR_PERMISSION));
}
} catch (FileNotFoundException fnf) {} //ignore
// Make sure that the backup data is preserved
@@ -2144,10 +2148,15 @@ public class JobTracker implements MRCon
// Initialize history DONE folder
if (historyInitialized) {
JobHistory.initDone(conf, fs);
- String historyLogDir =
+ final String historyLogDir =
JobHistory.getCompletedJobHistoryLocation().toString();
infoServer.setAttribute("historyLogDir", historyLogDir);
- FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf);
+ FileSystem historyFS = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ public FileSystem run() throws IOException {
+ return new Path(historyLogDir).getFileSystem(conf);
+ }
+ });
+ infoServer.setAttribute("historyLogDir", historyLogDir);
infoServer.setAttribute("fileSys", historyFS);
}
@@ -2218,14 +2227,6 @@ public class JobTracker implements MRCon
}
/**
- * Get the FileSystem for the given path. This can be used to resolve
- * filesystem for job history, local job files or mapred.system.dir path.
- */
- FileSystem getFileSystem(Path path) throws IOException {
- return path.getFileSystem(conf);
- }
-
- /**
* Get JobTracker's LocalFileSystem handle. This is used by jobs for
* localizing job files to the local disk.
*/
@@ -3515,7 +3516,7 @@ public class JobTracker implements MRCon
//job already running, don't start twice
return jobs.get(jobId).getStatus();
}
- UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
JobInfo jobInfo = new JobInfo(jobId, new Text(ugi.getUserName()),
new Path(jobSubmitDir));
JobInProgress job = null;
@@ -3573,7 +3574,7 @@ public class JobTracker implements MRCon
new Path(conf.get("mapreduce.jobtracker.staging.root.dir",
"/tmp/hadoop/mapred/staging"));
FileSystem fs = stagingRootDir.getFileSystem(conf);
- String user = UserGroupInformation.getCurrentUGI().getUserName();
+ String user = UserGroupInformation.getCurrentUser().getUserName();
return fs.makeQualified(new Path(stagingRootDir,
user+"/.staging")).toString();
}
@@ -3609,7 +3610,7 @@ public class JobTracker implements MRCon
QueueManager.QueueOperation oper)
throws IOException {
// get the user group info
- UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
checkAccess(job, oper, ugi);
}
@@ -3774,8 +3775,8 @@ public class JobTracker implements MRCon
* @param priority new priority of the job
*/
public synchronized void setJobPriority(JobID jobid,
- String priority)
- throws IOException {
+ String priority)
+ throws IOException {
JobInProgress job = jobs.get(jobid);
if (null == job) {
LOG.info("setJobPriority(): JobId " + jobid.toString()
@@ -4270,12 +4271,35 @@ public class JobTracker implements MRCon
}
/**
+ * Is the current user a super user?
+ * @return true, if it is a super user
+ * @throws IOException if there are problems getting the current user
+ */
+ private synchronized boolean isSuperUser() throws IOException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ if (mrOwner.getUserName().equals(ugi.getUserName()) ) {
+ return true;
+ }
+ String[] groups = ugi.getGroupNames();
+ for(int i=0; i < groups.length; ++i) {
+ if (groups[i].equals(supergroup)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
* Rereads the config to get hosts and exclude list file names.
* Rereads the files to update the hosts and exclude lists.
*/
public synchronized void refreshNodes() throws IOException {
// check access
- PermissionChecker.checkSuperuserPrivilege(mrOwner, supergroup);
+ if (!isSuperUser()) {
+ String user = UserGroupInformation.getCurrentUser().getUserName();
+ throw new AccessControlException(user +
+ " is not authorized to refresh nodes.");
+ }
// call the actual api
refreshHosts();
@@ -4423,7 +4447,7 @@ public class JobTracker implements MRCon
@Override
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{
return queueManager.getQueueAcls(
- UserGroupInformation.getCurrentUGI());
+ UserGroupInformation.getCurrentUser());
}
private synchronized JobStatus[] getJobStatus(Collection<JobInProgress> jips,
boolean toComplete) {
@@ -4461,7 +4485,7 @@ public class JobTracker implements MRCon
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
throw new AuthorizationException("Service Level Authorization not enabled!");
}
- SecurityUtil.getPolicy().refresh();
+ ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
}
private void initializeTaskMemoryRelatedConfig() {
@@ -4514,13 +4538,13 @@ public class JobTracker implements MRCon
limitMaxMemForReduceTasks).append(")"));
}
-
+
@Override
public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
- UserGroupInformation.getCurrentUGI().getUserName());
+ UserGroupInformation.getCurrentUser().getUserName());
- SecurityUtil.getUserToGroupsMappingService(conf).refresh();
+ Groups.getUserToGroupsMappingService(conf).refresh();
}
private boolean perTaskMemoryConfigurationSetOnJT() {
@@ -4578,7 +4602,7 @@ public class JobTracker implements MRCon
@Override
public void refreshQueueAcls() throws IOException{
LOG.info("Refreshing queue acls. requested by : " +
- UserGroupInformation.getCurrentUGI().getUserName());
+ UserGroupInformation.getCurrentUser().getUserName());
this.queueManager.refreshAcls(new Configuration(this.conf));
}
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java Fri Mar 4 03:44:54 2011
@@ -393,7 +393,7 @@ class LinuxTaskController extends TaskCo
throws IOException {
String[] taskControllerCmd = new String[3 + cmdArgs.size()];
taskControllerCmd[0] = getTaskControllerExecutablePath();
- taskControllerCmd[1] = userName;
+ taskControllerCmd[1] = TaskTracker.getShortUserName(userName);
taskControllerCmd[2] = String.valueOf(command.ordinal());
int i = 3;
for (String cmdArg : cmdArgs) {
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java Fri Mar 4 03:44:54 2011
@@ -189,6 +189,8 @@ class LocalJobRunner implements JobSubmi
MapTask map = new MapTask(systemJobFile.toString(),
mapId, i,
taskSplitMetaInfos[i].getSplitIndex(), 1);
+ map.setUser(UserGroupInformation.getCurrentUser().
+ getShortUserName());
JobConf localConf = new JobConf(job);
TaskRunner.setupChildMapredLocalDirs(map, localConf);
@@ -197,6 +199,7 @@ class LocalJobRunner implements JobSubmi
mapOutputFiles.put(mapId, mapOutput);
map.setJobFile(localJobFile.toString());
+ localConf.setUser(map.getUser());
map.localizeConfiguration(localConf);
map.setConf(localConf);
map_tasks += 1;
@@ -216,6 +219,8 @@ class LocalJobRunner implements JobSubmi
ReduceTask reduce =
new ReduceTask(systemJobFile.toString(), reduceId, 0, mapIds.size(),
1);
+ reduce.setUser(UserGroupInformation.getCurrentUser().
+ getShortUserName());
JobConf localConf = new JobConf(job);
TaskRunner.setupChildMapredLocalDirs(reduce, localConf);
// move map output to reduce input
@@ -240,6 +245,7 @@ class LocalJobRunner implements JobSubmi
}
if (!this.isInterrupted()) {
reduce.setJobFile(localJobFile.toString());
+ localConf.setUser(reduce.getUser());
reduce.localizeConfiguration(localConf);
reduce.setConf(localConf);
reduce_tasks += 1;
@@ -499,7 +505,7 @@ class LocalJobRunner implements JobSubmi
Path stagingRootDir =
new Path(conf.get("mapreduce.jobtracker.staging.root.dir",
"/tmp/hadoop/mapred/staging"));
- UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user;
if (ugi != null) {
user = ugi.getUserName() + rand.nextInt();
Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/QueueManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/QueueManager.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/QueueManager.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/QueueManager.java Fri Mar 4 03:44:54 2011
@@ -31,7 +31,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils;
/**
@@ -186,20 +186,11 @@ class QueueManager {
}
// Check the ACL list
- boolean allowed = acl.allAllowed();
+ boolean allowed = acl.isAllAllowed();
if (!allowed) {
// Check the allowed users list
- if (acl.getUsers().contains(ugi.getUserName())) {
+ if (acl.isUserAllowed(ugi)) {
allowed = true;
- } else {
- // Check the allowed groups list
- Set<String> allowedGroups = acl.getGroups();
- for (String group : ugi.getGroupNames()) {
- if (allowedGroups.contains(group)) {
- allowed = true;
- break;
- }
- }
}
}