You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2011/11/15 03:39:29 UTC
svn commit: r1202013 [3/3] - in
/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/
src/main/java/org/apache/hadoop/hdfs/...
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Tue Nov 15 02:39:13 2011
@@ -25,12 +25,14 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -62,7 +64,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
-import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
@@ -81,6 +82,7 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
@@ -90,6 +92,8 @@ import org.apache.hadoop.security.Securi
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenRenewer;
@@ -112,17 +116,24 @@ public class WebHdfsFileSystem extends F
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
/** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
+ /** Token selector */
+ public static final AbstractDelegationTokenSelector<DelegationTokenIdentifier> DT_SELECTOR
+ = new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(TOKEN_KIND) {};
+
+ private static DelegationTokenRenewer<WebHdfsFileSystem> DT_RENEWER = null;
- private static final DelegationTokenRenewer<WebHdfsFileSystem> dtRenewer
- = new DelegationTokenRenewer<WebHdfsFileSystem>(WebHdfsFileSystem.class);
- static {
- dtRenewer.start();
+ private static synchronized void addRenewAction(final WebHdfsFileSystem webhdfs) {
+ if (DT_RENEWER == null) {
+ DT_RENEWER = new DelegationTokenRenewer<WebHdfsFileSystem>(WebHdfsFileSystem.class);
+ DT_RENEWER.start();
+ }
+
+ DT_RENEWER.addRenewAction(webhdfs);
}
private final UserGroupInformation ugi;
private InetSocketAddress nnAddr;
private Token<?> delegationToken;
- private Token<?> renewToken;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private Path workingDir;
@@ -151,8 +162,7 @@ public class WebHdfsFileSystem extends F
protected void initDelegationToken() throws IOException {
// look for webhdfs token, then try hdfs
final Text serviceName = SecurityUtil.buildTokenService(nnAddr);
- Token<?> token = webhdfspTokenSelector.selectToken(
- serviceName, ugi.getTokens());
+ Token<?> token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens());
if (token == null) {
token = DelegationTokenSelector.selectHdfsDelegationToken(
nnAddr, ugi, getConf());
@@ -169,7 +179,7 @@ public class WebHdfsFileSystem extends F
if (token != null) {
setDelegationToken(token);
if (createdToken) {
- dtRenewer.addRenewAction(this);
+ addRenewAction(this);
LOG.debug("Created new DT for " + token.getService());
} else {
LOG.debug("Found existing DT for " + token.getService());
@@ -193,9 +203,14 @@ public class WebHdfsFileSystem extends F
}
}
+ /** @return the home directory. */
+ public static String getHomeDirectoryString(final UserGroupInformation ugi) {
+ return "/user/" + ugi.getShortUserName();
+ }
+
@Override
public Path getHomeDirectory() {
- return makeQualified(new Path("/user/" + ugi.getShortUserName()));
+ return makeQualified(new Path(getHomeDirectoryString(ugi)));
}
@Override
@@ -217,7 +232,7 @@ public class WebHdfsFileSystem extends F
return f.isAbsolute()? f: new Path(workingDir, f);
}
- private static Map<?, ?> jsonParse(final InputStream in) throws IOException {
+ static Map<?, ?> jsonParse(final InputStream in) throws IOException {
if (in == null) {
throw new IOException("The input stream is null.");
}
@@ -243,13 +258,16 @@ public class WebHdfsFileSystem extends F
final RemoteException re = JsonUtil.toRemoteException(m);
throw re.unwrapRemoteException(AccessControlException.class,
- DSQuotaExceededException.class,
+ InvalidToken.class,
+ AuthenticationException.class,
+ AuthorizationException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
+ UnresolvedPathException.class,
SafeModeException.class,
- NSQuotaExceededException.class,
- UnresolvedPathException.class);
+ DSQuotaExceededException.class,
+ NSQuotaExceededException.class);
}
return null;
}
@@ -326,14 +344,13 @@ public class WebHdfsFileSystem extends F
final URL url = toUrl(op, fspath, parameters);
//connect and get response
- final HttpURLConnection conn = getHttpUrlConnection(url);
+ HttpURLConnection conn = getHttpUrlConnection(url);
try {
conn.setRequestMethod(op.getType().toString());
- conn.setDoOutput(op.getDoOutput());
if (op.getDoOutput()) {
- conn.setRequestProperty("Expect", "100-Continue");
- conn.setInstanceFollowRedirects(true);
+ conn = twoStepWrite(conn, op);
}
+ conn.setDoOutput(op.getDoOutput());
conn.connect();
return conn;
} catch (IOException e) {
@@ -341,6 +358,35 @@ public class WebHdfsFileSystem extends F
throw e;
}
}
+
+ /**
+ * Two-step Create/Append:
+ * Step 1) Submit a Http request with neither auto-redirect nor data.
+ * Step 2) Submit another Http request with the URL from the Location header with data.
+ *
+ * The reason of having two-step create/append is for preventing clients to
+ * send out the data before the redirect. This issue is addressed by the
+ * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
+ * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
+ * and Java 6 http client), which do not correctly implement "Expect:
+ * 100-continue". The two-step create/append is a temporary workaround for
+ * the software library bugs.
+ */
+ static HttpURLConnection twoStepWrite(HttpURLConnection conn,
+ final HttpOpParam.Op op) throws IOException {
+ //Step 1) Submit a Http request with neither auto-redirect nor data.
+ conn.setInstanceFollowRedirects(false);
+ conn.setDoOutput(false);
+ conn.connect();
+ validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn);
+ final String redirect = conn.getHeaderField("Location");
+ conn.disconnect();
+
+ //Step 2) Submit another Http request with the URL from the Location header with data.
+ conn = (HttpURLConnection)new URL(redirect).openConnection();
+ conn.setRequestMethod(op.getType().toString());
+ return conn;
+ }
/**
* Run a http operation.
@@ -388,9 +434,9 @@ public class WebHdfsFileSystem extends F
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
- f.getBlockSize(), f.getModificationTime(),
- f.getAccessTime(),
+ f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
+ f.isSymlink() ? new Path(f.getSymlink()) : null,
f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
@@ -471,7 +517,7 @@ public class WebHdfsFileSystem extends F
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
}
- private FSDataOutputStream write(final HttpOpParam.Op op,
+ FSDataOutputStream write(final HttpOpParam.Op op,
final HttpURLConnection conn, final int bufferSize) throws IOException {
return new FSDataOutputStream(new BufferedOutputStream(
conn.getOutputStream(), bufferSize), statistics) {
@@ -480,7 +526,11 @@ public class WebHdfsFileSystem extends F
try {
super.close();
} finally {
- validateResponse(op, conn);
+ try {
+ validateResponse(op, conn);
+ } finally {
+ conn.disconnect();
+ }
}
}
};
@@ -532,24 +582,84 @@ public class WebHdfsFileSystem extends F
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.OPEN;
final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
- ByteRangeInputStream str = getByteRangeInputStream(url);
- return new FSDataInputStream(str);
+ return new FSDataInputStream(new OffsetUrlInputStream(
+ new OffsetUrlOpener(url), new OffsetUrlOpener(null)));
}
- private class URLOpener extends ByteRangeInputStream.URLOpener {
-
- public URLOpener(URL u) {
- super(u);
+ class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
+ /** The url with offset parameter */
+ private URL offsetUrl;
+
+ OffsetUrlOpener(final URL url) {
+ super(url);
}
+ /** Open connection with offset url. */
@Override
- public HttpURLConnection openConnection() throws IOException {
+ protected HttpURLConnection openConnection() throws IOException {
return getHttpUrlConnection(offsetUrl);
}
+
+ /** Setup offset url before open connection. */
+ @Override
+ protected HttpURLConnection openConnection(final long offset) throws IOException {
+ offsetUrl = offset == 0L? url: new URL(url + "&" + new OffsetParam(offset));
+ final HttpURLConnection conn = openConnection();
+ conn.setRequestMethod("GET");
+ return conn;
+ }
}
-
- private ByteRangeInputStream getByteRangeInputStream(URL url) {
- return new ByteRangeInputStream(new URLOpener(url), new URLOpener(null));
+
+ private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
+
+ /** Remove offset parameter, if there is any, from the url */
+ static URL removeOffsetParam(final URL url) throws MalformedURLException {
+ String query = url.getQuery();
+ if (query == null) {
+ return url;
+ }
+ final String lower = query.toLowerCase();
+ if (!lower.startsWith(OFFSET_PARAM_PREFIX)
+ && !lower.contains("&" + OFFSET_PARAM_PREFIX)) {
+ return url;
+ }
+
+ //rebuild query
+ StringBuilder b = null;
+ for(final StringTokenizer st = new StringTokenizer(query, "&");
+ st.hasMoreTokens();) {
+ final String token = st.nextToken();
+ if (!token.toLowerCase().startsWith(OFFSET_PARAM_PREFIX)) {
+ if (b == null) {
+ b = new StringBuilder("?").append(token);
+ } else {
+ b.append('&').append(token);
+ }
+ }
+ }
+ query = b == null? "": b.toString();
+
+ final String urlStr = url.toString();
+ return new URL(urlStr.substring(0, urlStr.indexOf('?')) + query);
+ }
+
+ static class OffsetUrlInputStream extends ByteRangeInputStream {
+ OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
+ super(o, r);
+ }
+
+ @Override
+ protected void checkResponseCode(final HttpURLConnection connection
+ ) throws IOException {
+ validateResponse(GetOpParam.Op.OPEN, connection);
+ }
+
+ /** Remove offset parameter before returning the resolved url. */
+ @Override
+ protected URL getResolvedUrl(final HttpURLConnection connection
+ ) throws MalformedURLException {
+ return removeOffsetParam(connection.getURL());
+ }
}
@Override
@@ -558,8 +668,8 @@ public class WebHdfsFileSystem extends F
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
final Map<?, ?> json = run(op, f);
- final Map<?, ?> rootmap = (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName() + "es");
- final Object[] array = (Object[])rootmap.get(HdfsFileStatus.class.getSimpleName());
+ final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
+ final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());
//convert FileStatus
final FileStatus[] statuses = new FileStatus[array.length];
@@ -577,7 +687,7 @@ public class WebHdfsFileSystem extends F
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
- token.setService(new Text(getCanonicalServiceName()));
+ SecurityUtil.setTokenService(token, nnAddr);
return token;
}
@@ -590,23 +700,14 @@ public class WebHdfsFileSystem extends F
@Override
public Token<?> getRenewToken() {
- return renewToken;
+ return delegationToken;
}
@Override
public <T extends TokenIdentifier> void setDelegationToken(
final Token<T> token) {
synchronized(this) {
- renewToken = token;
- // emulate the 203 usage of the tokens
- // by setting the kind and service as if they were hdfs tokens
- delegationToken = new Token<T>(token);
- // NOTE: the remote nn must be configured to use hdfs
- delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
- // no need to change service because we aren't exactly sure what it
- // should be. we can guess, but it might be wrong if the local conf
- // value is incorrect. the service is a client side field, so the remote
- // end does not care about the value
+ delegationToken = token;
}
}
@@ -641,7 +742,7 @@ public class WebHdfsFileSystem extends F
final long offset, final long length) throws IOException {
statistics.incrementReadOps(1);
- final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
+ final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
final Map<?, ?> m = run(op, p, new OffsetParam(offset),
new LengthParam(length));
return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
@@ -666,15 +767,6 @@ public class WebHdfsFileSystem extends F
return JsonUtil.toMD5MD5CRC32FileChecksum(m);
}
- private static final DtSelector webhdfspTokenSelector = new DtSelector();
-
- private static class DtSelector
- extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
- private DtSelector() {
- super(TOKEN_KIND);
- }
- }
-
/** Delegation token renewer. */
public static class DtRenewer extends TokenRenewer {
@Override
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java Tue Nov 15 02:39:13 2011
@@ -30,14 +30,25 @@ import javax.ws.rs.ext.Provider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
import com.sun.jersey.api.ParamException;
+import com.sun.jersey.api.container.ContainerException;
/** Handle exceptions. */
@Provider
public class ExceptionHandler implements ExceptionMapper<Exception> {
public static final Log LOG = LogFactory.getLog(ExceptionHandler.class);
+ private static Exception toCause(Exception e) {
+ final Throwable t = e.getCause();
+ if (t != null && t instanceof Exception) {
+ e = (Exception)e.getCause();
+ }
+ return e;
+ }
+
private @Context HttpServletResponse response;
@Override
@@ -55,12 +66,20 @@ public class ExceptionHandler implements
e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
+ paramexception.getParameterName() + "\": "
+ e.getCause().getMessage(), e);
- }
+ }
+ if (e instanceof ContainerException) {
+ e = toCause(e);
+ }
+ if (e instanceof RemoteException) {
+ e = ((RemoteException)e).unwrapRemoteException();
+ }
//Map response status
final Response.Status s;
if (e instanceof SecurityException) {
s = Response.Status.UNAUTHORIZED;
+ } else if (e instanceof AuthorizationException) {
+ s = Response.Status.UNAUTHORIZED;
} else if (e instanceof FileNotFoundException) {
s = Response.Status.NOT_FOUND;
} else if (e instanceof IOException) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Tue Nov 15 02:39:13 2011
@@ -24,15 +24,18 @@ public class GetOpParam extends HttpOpPa
/** Get operations. */
public static enum Op implements HttpOpParam.Op {
OPEN(HttpURLConnection.HTTP_OK),
- GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
GETFILESTATUS(HttpURLConnection.HTTP_OK),
LISTSTATUS(HttpURLConnection.HTTP_OK),
GETCONTENTSUMMARY(HttpURLConnection.HTTP_OK),
GETFILECHECKSUM(HttpURLConnection.HTTP_OK),
+ GETHOMEDIRECTORY(HttpURLConnection.HTTP_OK),
GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
+ /** GET_BLOCK_LOCATIONS is a private unstable op. */
+ GET_BLOCK_LOCATIONS(HttpURLConnection.HTTP_OK),
+
NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final int expectedHttpResponseCode;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java Tue Nov 15 02:39:13 2011
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.web.resources;
+import javax.ws.rs.core.Response;
+
+
/** Http operation parameter. */
public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
extends EnumParam<E> {
@@ -46,6 +49,49 @@ public abstract class HttpOpParam<E exte
public String toQueryString();
}
+ /** Expects HTTP response 307 "Temporary Redirect". */
+ public static class TemporaryRedirectOp implements Op {
+ static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(PutOpParam.Op.CREATE);
+ static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(PostOpParam.Op.APPEND);
+
+ /** Get an object for the given op. */
+ public static TemporaryRedirectOp valueOf(final Op op) {
+ if (op == CREATE.op) {
+ return CREATE;
+ } else if (op == APPEND.op) {
+ return APPEND;
+ }
+ throw new IllegalArgumentException(op + " not found.");
+ }
+
+ private final Op op;
+
+ private TemporaryRedirectOp(final Op op) {
+ this.op = op;
+ }
+
+ @Override
+ public Type getType() {
+ return op.getType();
+ }
+
+ @Override
+ public boolean getDoOutput() {
+ return op.getDoOutput();
+ }
+
+ /** Override the original expected response with "Temporary Redirect". */
+ @Override
+ public int getExpectedHttpResponseCode() {
+ return Response.Status.TEMPORARY_REDIRECT.getStatusCode();
+ }
+
+ @Override
+ public String toQueryString() {
+ return op.toQueryString();
+ }
+ }
+
HttpOpParam(final Domain<E> domain, final E value) {
super(domain, value);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java Tue Nov 15 02:39:13 2011
@@ -53,7 +53,8 @@ public class UserProvider
return JspHelper.getUGI(servletcontext, request, conf,
AuthenticationMethod.KERBEROS, false);
} catch (IOException e) {
- throw new RuntimeException(e);
+ throw new SecurityException(
+ "Failed to obtain user group information: " + e, e);
}
}
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1196451
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1202009
/hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
/hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
/hadoop/core/trunk/src/c++/libhdfs:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1196451
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1202009
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
/hadoop/core/trunk/src/webapps/datanode:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1196451
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1202009
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1196451
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1202009
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
/hadoop/core/trunk/src/webapps/secondary:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1196451
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1202009
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
/hadoop/core/trunk/src/test/hdfs:776175-785643
/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Tue Nov 15 02:39:13 2011
@@ -139,15 +139,17 @@ public class BlockReaderTestUtil {
ExtendedBlock block = testBlock.getBlock();
DatanodeInfo[] nodes = testBlock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
- sock = new Socket();
+ sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
return BlockReaderFactory.newBlockReader(
+ new DFSClient.Conf(conf),
sock, targetAddr.toString()+ ":" + block.getBlockId(), block,
testBlock.getBlockToken(),
offset, lenToRead,
- conf.getInt("io.file.buffer.size", 4096));
+ conf.getInt("io.file.buffer.size", 4096),
+ true, "");
}
/**
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java Tue Nov 15 02:39:13 2011
@@ -31,10 +31,10 @@ import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;
-import org.apache.hadoop.hdfs.ByteRangeInputStream.URLOpener;
import org.junit.Test;
-class MockHttpURLConnection extends HttpURLConnection {
+public class TestByteRangeInputStream {
+public static class MockHttpURLConnection extends HttpURLConnection {
public MockHttpURLConnection(URL u) {
super(u);
}
@@ -85,54 +85,18 @@ class MockHttpURLConnection extends Http
responseCode = resCode;
}
}
-
-public class TestByteRangeInputStream {
- @Test
- public void testRemoveOffset() throws IOException {
- { //no offset
- String s = "http://test/Abc?Length=99";
- assertEquals(s, ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
- }
-
- { //no parameters
- String s = "http://test/Abc";
- assertEquals(s, ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
- }
-
- { //offset as first parameter
- String s = "http://test/Abc?offset=10&Length=99";
- assertEquals("http://test/Abc?Length=99",
- ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
- }
-
- { //offset as second parameter
- String s = "http://test/Abc?op=read&OFFset=10&Length=99";
- assertEquals("http://test/Abc?op=read&Length=99",
- ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
- }
-
- { //offset as last parameter
- String s = "http://test/Abc?Length=99&offset=10";
- assertEquals("http://test/Abc?Length=99",
- ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
- }
-
- { //offset as the only parameter
- String s = "http://test/Abc?offset=10";
- assertEquals("http://test/Abc",
- ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
- }
- }
@Test
public void testByteRange() throws IOException {
- URLOpener ospy = spy(new URLOpener(new URL("http://test/")));
+ HftpFileSystem.RangeHeaderUrlOpener ospy = spy(
+ new HftpFileSystem.RangeHeaderUrlOpener(new URL("http://test/")));
doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy)
.openConnection();
- URLOpener rspy = spy(new URLOpener((URL) null));
+ HftpFileSystem.RangeHeaderUrlOpener rspy = spy(
+ new HftpFileSystem.RangeHeaderUrlOpener((URL) null));
doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy)
.openConnection();
- ByteRangeInputStream is = new ByteRangeInputStream(ospy, rspy);
+ ByteRangeInputStream is = new HftpFileSystem.RangeHeaderInputStream(ospy, rspy);
assertEquals("getPos wrong", 0, is.getPos());
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java Tue Nov 15 02:39:13 2011
@@ -20,11 +20,12 @@ package org.apache.hadoop.hdfs;
import java.util.List;
-import org.apache.hadoop.hdfs.RemoteBlockReader;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Level;
import org.junit.Test;
import org.junit.AfterClass;
@@ -40,6 +41,9 @@ public class TestClientBlockVerification
static final int FILE_SIZE_K = 256;
static LocatedBlock testBlock = null;
+ static {
+ ((Log4JLogger)RemoteBlockReader2.LOG).getLogger().setLevel(Level.ALL);
+ }
@BeforeClass
public static void setupCluster() throws Exception {
final int REPLICATION_FACTOR = 1;
@@ -54,7 +58,7 @@ public class TestClientBlockVerification
*/
@Test
public void testBlockVerification() throws Exception {
- RemoteBlockReader reader = (RemoteBlockReader)spy(
+ RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
util.readAndCheckEOS(reader, FILE_SIZE_K * 1024, true);
verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
@@ -66,7 +70,7 @@ public class TestClientBlockVerification
*/
@Test
public void testIncompleteRead() throws Exception {
- RemoteBlockReader reader = (RemoteBlockReader)spy(
+ RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
util.readAndCheckEOS(reader, FILE_SIZE_K / 2 * 1024, false);
@@ -84,7 +88,7 @@ public class TestClientBlockVerification
@Test
public void testCompletePartialRead() throws Exception {
// Ask for half the file
- RemoteBlockReader reader = (RemoteBlockReader)spy(
+ RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2));
// And read half the file
util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true);
@@ -104,7 +108,7 @@ public class TestClientBlockVerification
for (int length : lengths) {
DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
" len=" + length);
- RemoteBlockReader reader = (RemoteBlockReader)spy(
+ RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, startOffset, length));
util.readAndCheckEOS(reader, length, true);
verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Tue Nov 15 02:39:13 2011
@@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.RemoteBlockReader;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.SocketCache;
@@ -76,20 +75,20 @@ public class TestConnCache {
* It verifies that all invocation to DFSInputStream.getBlockReader()
* use the same socket.
*/
- private class MockGetBlockReader implements Answer<RemoteBlockReader> {
- public RemoteBlockReader reader = null;
+ private class MockGetBlockReader implements Answer<RemoteBlockReader2> {
+ public RemoteBlockReader2 reader = null;
private Socket sock = null;
- public RemoteBlockReader answer(InvocationOnMock invocation) throws Throwable {
- RemoteBlockReader prevReader = reader;
- reader = (RemoteBlockReader) invocation.callRealMethod();
+ public RemoteBlockReader2 answer(InvocationOnMock invocation) throws Throwable {
+ RemoteBlockReader2 prevReader = reader;
+ reader = (RemoteBlockReader2) invocation.callRealMethod();
if (sock == null) {
sock = reader.dnSock;
- } else if (prevReader != null && prevReader.hasSentStatusCode()) {
- // Can't reuse socket if the previous BlockReader didn't read till EOS.
+ } else if (prevReader != null) {
assertSame("DFSInputStream should use the same socket",
sock, reader.dnSock);
- } return reader;
+ }
+ return reader;
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Tue Nov 15 02:39:13 2011
@@ -74,7 +74,7 @@ public class TestDataTransferProtocol ex
"org.apache.hadoop.hdfs.TestDataTransferProtocol");
private static final DataChecksum DEFAULT_CHECKSUM =
- DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512);
+ DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32C, 512);
DatanodeID datanode;
InetSocketAddress dnAddr;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java Tue Nov 15 02:39:13 2011
@@ -19,11 +19,14 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
+import org.junit.Test;
+
/** Test for simple signs of life using Avro RPC. Not an exhaustive test
* yet, just enough to catch fundamental problems using Avro reflection to
* infer namenode RPC protocols. */
public class TestDfsOverAvroRpc extends TestLocalDFS {
+ @Test(timeout=20000)
public void testWorkingDirectory() throws IOException {
System.setProperty("hdfs.rpc.engine",
"org.apache.hadoop.ipc.AvroRpcEngine");
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java Tue Nov 15 02:39:13 2011
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
import java.io.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -27,7 +28,7 @@ import org.apache.hadoop.fs.Path;
* This class tests the DFS class via the FileSystem interface in a single node
* mini-cluster.
*/
-public class TestLocalDFS extends TestCase {
+public class TestLocalDFS {
private void writeFile(FileSystem fileSys, Path name) throws IOException {
DataOutputStream stm = fileSys.create(name);
@@ -59,6 +60,7 @@ public class TestLocalDFS extends TestCa
/**
* Tests get/set working directory in DFS.
*/
+ @Test(timeout=20000)
public void testWorkingDirectory() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java Tue Nov 15 02:39:13 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.ChecksumFile
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
/**
* This class tests the presence of seek bug as described
@@ -67,12 +68,12 @@ public class TestSeekBug extends TestCas
stm.read(actual, 0, actual.length);
// Now read a byte array that is bigger than the internal buffer
actual = new byte[100000];
- stm.read(actual, 0, actual.length);
+ IOUtils.readFully(stm, actual, 0, actual.length);
checkAndEraseData(actual, 128, expected, "First Read Test");
// now do a small seek, within the range that is already read
stm.seek(96036); // 4 byte seek
actual = new byte[128];
- stm.read(actual, 0, actual.length);
+ IOUtils.readFully(stm, actual, 0, actual.length);
checkAndEraseData(actual, 96036, expected, "Seek Bug");
// all done
stm.close();
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Tue Nov 15 02:39:13 2011
@@ -23,29 +23,46 @@ package org.apache.hadoop.hdfs.security;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
+import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.NetworkInterface;
+import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Enumeration;
+import java.util.Map;
-import junit.framework.Assert;
+import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.hdfs.web.resources.DoAsParam;
+import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.security.TestDoAsEffectiveUser;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
+import org.apache.log4j.Level;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -89,6 +106,7 @@ public class TestDelegationTokenForProxy
@Before
public void setUp() throws Exception {
config = new HdfsConfiguration();
+ config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(
@@ -137,4 +155,63 @@ public class TestDelegationTokenForProxy
}
}
+ @Test
+ public void testWebHdfsDoAs() throws Exception {
+ WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
+ ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
+ final UserGroupInformation ugi = UserGroupInformation.createRemoteUser(REAL_USER);
+ WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
+ final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config);
+
+ final Path root = new Path("/");
+ cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));
+
+ {
+ //test GETHOMEDIRECTORY with doAs
+ final URL url = WebHdfsTestUtil.toUrl(webhdfs,
+ GetOpParam.Op.GETHOMEDIRECTORY, root, new DoAsParam(PROXY_USER));
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(conn, HttpServletResponse.SC_OK);
+ conn.disconnect();
+
+ final Object responsePath = m.get(Path.class.getSimpleName());
+ WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
+ Assert.assertEquals("/user/" + PROXY_USER, responsePath);
+ }
+
+ {
+ //test GETHOMEDIRECTORY with DOas
+ final URL url = WebHdfsTestUtil.toUrl(webhdfs,
+ GetOpParam.Op.GETHOMEDIRECTORY, root, new DoAsParam(PROXY_USER) {
+ @Override
+ public String getName() {
+ return "DOas";
+ }
+ });
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(conn, HttpServletResponse.SC_OK);
+ conn.disconnect();
+
+ final Object responsePath = m.get(Path.class.getSimpleName());
+ WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
+ Assert.assertEquals("/user/" + PROXY_USER, responsePath);
+ }
+
+ {
+ //test create file with doAs
+ final Path f = new Path("/testWebHdfsDoAs/a.txt");
+ final PutOpParam.Op op = PutOpParam.Op.CREATE;
+ final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+ final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
+ out.write("Hello, webhdfs user!".getBytes());
+ out.close();
+
+ final FileStatus status = webhdfs.getFileStatus(f);
+ WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
+ Assert.assertEquals(PROXY_USER, status.getOwner());
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Tue Nov 15 02:39:13 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.bl
import static org.junit.Assert.*;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
@@ -355,25 +356,35 @@ public class TestBlockManager {
bm.blocksMap.addINode(blockInfo, iNode);
return blockInfo;
}
-
+
private DatanodeDescriptor[] scheduleSingleReplication(Block block) {
- assertEquals("Block not initially pending replication",
- 0, bm.pendingReplications.getNumReplicas(block));
- assertTrue("computeReplicationWork should indicate replication is needed",
- bm.computeReplicationWorkForBlock(block, 1));
+ // list for priority 1
+ List<Block> list_p1 = new ArrayList<Block>();
+ list_p1.add(block);
+
+ // list of lists for each priority
+ List<List<Block>> list_all = new ArrayList<List<Block>>();
+ list_all.add(new ArrayList<Block>()); // for priority 0
+ list_all.add(list_p1); // for priority 1
+
+ assertEquals("Block not initially pending replication", 0,
+ bm.pendingReplications.getNumReplicas(block));
+ assertEquals(
+ "computeReplicationWork should indicate replication is needed", 1,
+ bm.computeReplicationWorkForBlocks(list_all));
assertTrue("replication is pending after work is computed",
bm.pendingReplications.getNumReplicas(block) > 0);
-
- LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls =
- getAllPendingReplications();
+
+ LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls = getAllPendingReplications();
assertEquals(1, repls.size());
- Entry<DatanodeDescriptor, BlockTargetPair> repl = repls.entries().iterator().next();
+ Entry<DatanodeDescriptor, BlockTargetPair> repl = repls.entries()
+ .iterator().next();
DatanodeDescriptor[] targets = repl.getValue().targets;
-
+
DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length];
pipeline[0] = repl.getKey();
System.arraycopy(targets, 0, pipeline, 1, targets.length);
-
+
return pipeline;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Tue Nov 15 02:39:13 2011
@@ -137,15 +137,15 @@ public class TestBlockTokenWithDFS {
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
- s = new Socket();
+ s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid", block.getBlockId());
- blockReader = BlockReaderFactory.newBlockReader(s, file, block,
- lblock.getBlockToken(), 0, -1,
- conf.getInt("io.file.buffer.size", 4096));
+ blockReader = BlockReaderFactory.newBlockReader(
+ conf, s, file, block,
+ lblock.getBlockToken(), 0, -1);
} catch (IOException ex) {
if (ex instanceof InvalidBlockTokenException) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Tue Nov 15 02:39:13 2011
@@ -204,13 +204,13 @@ public class SimulatedFSDataset impleme
@Override
synchronized public BlockWriteStreams createStreams(boolean isCreate,
- int bytesPerChunk, int checksumSize) throws IOException {
+ DataChecksum requestedChecksum) throws IOException {
if (finalized) {
throw new IOException("Trying to write to a finalized replica "
+ theBlock);
} else {
SimulatedOutputStream crcStream = new SimulatedOutputStream();
- return new BlockWriteStreams(oStream, crcStream);
+ return new BlockWriteStreams(oStream, crcStream, requestedChecksum);
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Tue Nov 15 02:39:13 2011
@@ -56,6 +56,7 @@ import static org.junit.Assert.*;
public class TestDataNodeVolumeFailure {
final private int block_size = 512;
MiniDFSCluster cluster = null;
+ private Configuration conf;
int dn_num = 2;
int blocks_num = 30;
short repl=2;
@@ -74,7 +75,7 @@ public class TestDataNodeVolumeFailure {
@Before
public void setUp() throws Exception {
// bring up a cluster of 2
- Configuration conf = new HdfsConfiguration();
+ conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
// Allow a single volume failure (there are two volumes)
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
@@ -264,7 +265,7 @@ public class TestDataNodeVolumeFailure {
targetAddr = NetUtils.createSocketAddr(datanode.getName());
- s = new Socket();
+ s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
@@ -272,8 +273,8 @@ public class TestDataNodeVolumeFailure {
"test-blockpoolid",
block.getBlockId());
BlockReader blockReader =
- BlockReaderFactory.newBlockReader(s, file, block, lblock
- .getBlockToken(), 0, -1, 4096);
+ BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
+ .getBlockToken(), 0, -1);
// nothing - if it fails - it will throw and exception
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Tue Nov 15 02:39:13 2011
@@ -67,7 +67,8 @@ public class TestDatanodeJsp {
String viewFilePage = DFSTestUtil.urlGet(url);
- assertTrue("page should show preview of file contents", viewFilePage.contains(FILE_DATA));
+ assertTrue("page should show preview of file contents, got: " + viewFilePage,
+ viewFilePage.contains(FILE_DATA));
if (!doTail) {
assertTrue("page should show link to download file", viewFilePage
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Tue Nov 15 02:39:13 2011
@@ -64,7 +64,8 @@ public class TestSimulatedFSDataset exte
// we pass expected len as zero, - fsdataset should use the sizeof actual
// data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
- BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
+ BlockWriteStreams out = bInfo.createStreams(true,
+ DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
try {
OutputStream dataOut = out.dataOut;
assertEquals(0, fsdataset.getLength(b));
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Tue Nov 15 02:39:13 2011
@@ -323,9 +323,10 @@ public class TestListCorruptFileBlocks {
FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks
.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
// now get the 2nd and 3rd file that is corrupt
+ String[] cookie = new String[]{"1"};
Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks =
namenode.getNamesystem()
- .listCorruptFileBlocks("/corruptData", cfb[0].block.getBlockName());
+ .listCorruptFileBlocks("/corruptData", cookie);
FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks
.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
numCorrupt = nextCorruptFileBlocks.size();
@@ -333,9 +334,9 @@ public class TestListCorruptFileBlocks {
assertTrue(ncfb[0].block.getBlockName()
.equalsIgnoreCase(cfb[1].block.getBlockName()));
- corruptFileBlocks =
- namenode.getNamesystem().listCorruptFileBlocks("/corruptData",
- ncfb[1].block.getBlockName());
+ corruptFileBlocks =
+ namenode.getNamesystem()
+ .listCorruptFileBlocks("/corruptData", cookie);
numCorrupt = corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
// Do a listing on a dir which doesn't have any corrupt blocks and
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Tue Nov 15 02:39:13 2011
@@ -23,9 +23,8 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
-import java.net.URI;
import java.net.URL;
-import java.security.PrivilegedExceptionAction;
+import java.util.Map;
import javax.servlet.http.HttpServletResponse;
@@ -34,12 +33,12 @@ import org.apache.hadoop.fs.BlockLocatio
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
@@ -51,6 +50,8 @@ public class TestWebHdfsFileSystemContra
private static final Configuration conf = new Configuration();
private static final MiniDFSCluster cluster;
private String defaultWorkingDirectory;
+
+ private UserGroupInformation ugi;
static {
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
@@ -68,20 +69,11 @@ public class TestWebHdfsFileSystemContra
@Override
protected void setUp() throws Exception {
- final String uri = WebHdfsFileSystem.SCHEME + "://"
- + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-
//get file system as a non-superuser
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
- final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+ ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"});
- fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
- @Override
- public FileSystem run() throws Exception {
- return FileSystem.get(new URI(uri), conf);
- }
- });
-
+ fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf);
defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
}
@@ -263,9 +255,29 @@ public class TestWebHdfsFileSystemContra
public void testResponseCode() throws IOException {
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+ final Path root = new Path("/");
final Path dir = new Path("/test/testUrl");
assertTrue(webhdfs.mkdirs(dir));
+ {//test GETHOMEDIRECTORY
+ final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root);
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(
+ conn, HttpServletResponse.SC_OK);
+ assertEquals(WebHdfsFileSystem.getHomeDirectoryString(ugi),
+ m.get(Path.class.getSimpleName()));
+ conn.disconnect();
+ }
+
+ {//test GETHOMEDIRECTORY with unauthorized doAs
+ final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root,
+ new DoAsParam(ugi.getShortUserName() + "proxy"));
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.connect();
+ assertEquals(HttpServletResponse.SC_UNAUTHORIZED, conn.getResponseCode());
+ conn.disconnect();
+ }
+
{//test set owner with empty parameters
final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
@@ -280,7 +292,7 @@ public class TestWebHdfsFileSystemContra
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
- assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
+ assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertFalse(webhdfs.setReplication(dir, (short)1));
conn.disconnect();
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java Tue Nov 15 02:39:13 2011
@@ -75,7 +75,7 @@ public class TestWebHdfsUrl {
+ "&token=" + tokenString, renewTokenUrl.getQuery());
Token<DelegationTokenIdentifier> delegationToken = new Token<DelegationTokenIdentifier>(
token);
- delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+ delegationToken.setKind(WebHdfsFileSystem.TOKEN_KIND);
Assert.assertEquals(
generateUrlQueryPrefix(PutOpParam.Op.CANCELDELEGATIONTOKEN,
ugi.getUserName())