You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/10/28 01:13:26 UTC

svn commit: r1190077 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/ src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/ src/main/java/org/apache/hadoop/h...

Author: szetszwo
Date: Thu Oct 27 23:13:26 2011
New Revision: 1190077

URL: http://svn.apache.org/viewvc?rev=1190077&view=rev
Log:
HDFS-2432. Webhdfs: response FORBIDDEN when setReplication on non-files; clear umask before creating a flie; throw IllegalArgumentException if setOwner with both owner and group empty; throw FileNotFoundException if getFileStatus on non-existing files; fix bugs in getBlockLocations; and changed getFileChecksum json response root to "FileChecksum".

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1190077&r1=1190076&r2=1190077&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 27 23:13:26 2011
@@ -1208,6 +1208,12 @@ Release 0.23.0 - Unreleased
     HDFS-2494. Close the streams and DFSClient in DatanodeWebHdfsMethods.
     (Uma Maheswara Rao G via szetszwo)
 
+    HDFS-2432. Webhdfs: response FORBIDDEN when setReplication on non-files;
+    clear umask before creating a flie; throw IllegalArgumentException if
+    setOwner with both owner and group empty; throw FileNotFoundException if
+    getFileStatus on non-existing files; fix bugs in getBlockLocations; and
+    changed getFileChecksum json response root to "FileChecksum".  (szetszwo)
+
   BREAKDOWN OF HDFS-1073 SUBTASKS
 
     HDFS-1521. Persist transaction ID on disk between NN restarts.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1190077&r1=1190076&r2=1190077&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Thu Oct 27 23:13:26 2011
@@ -48,6 +48,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -152,6 +153,8 @@ public class DatanodeWebHdfsMethods {
     {
       final Configuration conf = new Configuration(datanode.getConf());
       final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
+      conf.set(FsPermission.UMASK_LABEL, "000");
+
       final int b = bufferSize.getValue(conf);
       DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
       FSDataOutputStream out = null;
@@ -307,12 +310,12 @@ public class DatanodeWebHdfsMethods {
     final DataNode datanode = (DataNode)context.getAttribute("datanode");
     final Configuration conf = new Configuration(datanode.getConf());
     final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
-    final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
 
     switch(op.getValue()) {
     case OPEN:
     {
       final int b = bufferSize.getValue(conf);
+      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
       DFSDataInputStream in = null;
       try {
         in = new DFSClient.DFSDataInputStream(
@@ -355,13 +358,13 @@ public class DatanodeWebHdfsMethods {
     case GETFILECHECKSUM:
     {
       MD5MD5CRC32FileChecksum checksum = null;
-      DFSClient client = dfsclient;
+      DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
       try {
-        checksum = client.getFileChecksum(fullpath);
-        client.close();
-        client = null;
+        checksum = dfsclient.getFileChecksum(fullpath);
+        dfsclient.close();
+        dfsclient = null;
       } finally {
-        IOUtils.cleanup(LOG, client);
+        IOUtils.cleanup(LOG, dfsclient);
       }
       final String js = JsonUtil.toJsonString(checksum);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1190077&r1=1190076&r2=1190077&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Thu Oct 27 23:13:26 2011
@@ -42,7 +42,9 @@ import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.StreamingOutput;
+import javax.ws.rs.core.Response.Status;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -318,10 +320,15 @@ public class NamenodeWebHdfsMethods {
     {
       final boolean b = np.setReplication(fullpath, replication.getValue(conf));
       final String js = JsonUtil.toJsonString("boolean", b);
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+      final ResponseBuilder r = b? Response.ok(): Response.status(Status.FORBIDDEN);
+      return r.entity(js).type(MediaType.APPLICATION_JSON).build();
     }
     case SETOWNER:
     {
+      if (owner.getValue() == null && group.getValue() == null) {
+        throw new IllegalArgumentException("Both owner and group are empty.");
+      }
+
       np.setOwner(fullpath, owner.getValue(), group.getValue());
       return Response.ok().type(MediaType.APPLICATION_JSON).build();
     }
@@ -487,13 +494,17 @@ public class NamenodeWebHdfsMethods {
       final long offsetValue = offset.getValue();
       final Long lengthValue = length.getValue();
       final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
-          offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
+          offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
       final String js = JsonUtil.toJsonString(locatedblocks);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case GETFILESTATUS:
     {
       final HdfsFileStatus status = np.getFileInfo(fullpath);
+      if (status == null) {
+        throw new FileNotFoundException("File does not exist: " + fullpath);
+      }
+
       final String js = JsonUtil.toJsonString(status, true);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1190077&r1=1190076&r2=1190077&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Oct 27 23:13:26 2011
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -334,7 +335,7 @@ public class JsonUtil {
     } else {
       final Object[] a = new Object[array.size()];
       for(int i = 0; i < array.size(); i++) {
-        a[i] = toJsonMap(array.get(0));
+        a[i] = toJsonMap(array.get(i));
       }
       return a;
     }
@@ -436,7 +437,7 @@ public class JsonUtil {
     m.put("algorithm", checksum.getAlgorithmName());
     m.put("length", checksum.getLength());
     m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
-    return toJsonString(MD5MD5CRC32FileChecksum.class, m);
+    return toJsonString(FileChecksum.class, m);
   }
 
   /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
@@ -446,8 +447,7 @@ public class JsonUtil {
       return null;
     }
 
-    final Map<?, ?> m = (Map<?, ?>)json.get(
-        MD5MD5CRC32FileChecksum.class.getSimpleName());
+    final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName());
     final String algorithm = (String)m.get("algorithm");
     final int length = (int)(long)(Long)m.get("length");
     final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1190077&r1=1190076&r2=1190077&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu Oct 27 23:13:26 2011
@@ -154,19 +154,18 @@ public class WebHdfsFileSystem extends H
     return f.isAbsolute()? f: new Path(workingDir, f);
   }
 
-  @SuppressWarnings("unchecked")
-  private static <T> T jsonParse(final InputStream in) throws IOException {
+  private static Map<?, ?> jsonParse(final InputStream in) throws IOException {
     if (in == null) {
       throw new IOException("The input stream is null.");
     }
-    return (T)JSON.parse(new InputStreamReader(in));
+    return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
   }
 
-  private static void validateResponse(final HttpOpParam.Op op,
+  private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
       final HttpURLConnection conn) throws IOException {
     final int code = conn.getResponseCode();
     if (code != op.getExpectedHttpResponseCode()) {
-      final Map<String, Object> m;
+      final Map<?, ?> m;
       try {
         m = jsonParse(conn.getErrorStream());
       } catch(IOException e) {
@@ -175,6 +174,10 @@ public class WebHdfsFileSystem extends H
             + ", message=" + conn.getResponseMessage(), e);
       }
 
+      if (m.get(RemoteException.class.getSimpleName()) == null) {
+        return m;
+      }
+
       final RemoteException re = JsonUtil.toRemoteException(m);
       throw re.unwrapRemoteException(AccessControlException.class,
           DSQuotaExceededException.class,
@@ -185,6 +188,7 @@ public class WebHdfsFileSystem extends H
           NSQuotaExceededException.class,
           UnresolvedPathException.class);
     }
+    return null;
   }
 
   URL toUrl(final HttpOpParam.Op op, final Path fspath,
@@ -235,15 +239,15 @@ public class WebHdfsFileSystem extends H
    * @param op http operation
    * @param fspath file system path
    * @param parameters parameters for the operation
-   * @return a JSON object, e.g. Object[], Map<String, Object>, etc.
+   * @return a JSON object, e.g. Object[], Map<?, ?>, etc.
    * @throws IOException
    */
-  private <T> T run(final HttpOpParam.Op op, final Path fspath,
+  private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
     final HttpURLConnection conn = httpConnect(op, fspath, parameters);
-    validateResponse(op, conn);
     try {
-      return WebHdfsFileSystem.<T>jsonParse(conn.getInputStream());
+      final Map<?, ?> m = validateResponse(op, conn);
+      return m != null? m: jsonParse(conn.getInputStream());
     } finally {
       conn.disconnect();
     }
@@ -258,7 +262,7 @@ public class WebHdfsFileSystem extends H
 
   private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
     final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
-    final Map<String, Object> json = run(op, f);
+    final Map<?, ?> json = run(op, f);
     final HdfsFileStatus status = JsonUtil.toFileStatus(json, true);
     if (status == null) {
       throw new FileNotFoundException("File does not exist: " + f);
@@ -284,7 +288,7 @@ public class WebHdfsFileSystem extends H
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.MKDIRS;
-    final Map<String, Object> json = run(op, f,
+    final Map<?, ?> json = run(op, f,
         new PermissionParam(applyUMask(permission)));
     return (Boolean)json.get("boolean");
   }
@@ -293,7 +297,7 @@ public class WebHdfsFileSystem extends H
   public boolean rename(final Path src, final Path dst) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.RENAME;
-    final Map<String, Object> json = run(op, src,
+    final Map<?, ?> json = run(op, src,
         new DestinationParam(makeQualified(dst).toUri().getPath()));
     return (Boolean)json.get("boolean");
   }
@@ -333,8 +337,7 @@ public class WebHdfsFileSystem extends H
      ) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
-    final Map<String, Object> json = run(op, p,
-        new ReplicationParam(replication));
+    final Map<?, ?> json = run(op, p, new ReplicationParam(replication));
     return (Boolean)json.get("boolean");
   }
 
@@ -403,7 +406,7 @@ public class WebHdfsFileSystem extends H
   @Override
   public boolean delete(Path f, boolean recursive) throws IOException {
     final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
-    final Map<String, Object> json = run(op, f, new RecursiveParam(recursive));
+    final Map<?, ?> json = run(op, f, new RecursiveParam(recursive));
     return (Boolean)json.get("boolean");
   }
 
@@ -428,8 +431,7 @@ public class WebHdfsFileSystem extends H
     //convert FileStatus
     final FileStatus[] statuses = new FileStatus[array.length];
     for(int i = 0; i < array.length; i++) {
-      @SuppressWarnings("unchecked")
-      final Map<String, Object> m = (Map<String, Object>)array[i];
+      final Map<?, ?> m = (Map<?, ?>)array[i];
       statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
     }
     return statuses;
@@ -439,7 +441,7 @@ public class WebHdfsFileSystem extends H
   public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
       ) throws IOException {
     final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
-    final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
+    final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
     final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
     token.setService(new Text(getCanonicalServiceName()));
     return token;
@@ -467,7 +469,7 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
 
     final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
-    final Map<String, Object> m = run(op, p, new OffsetParam(offset),
+    final Map<?, ?> m = run(op, p, new OffsetParam(offset),
         new LengthParam(length));
     return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
   }
@@ -477,7 +479,7 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
 
     final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY;
-    final Map<String, Object> m = run(op, p);
+    final Map<?, ?> m = run(op, p);
     return JsonUtil.toContentSummary(m);
   }
 
@@ -487,7 +489,7 @@ public class WebHdfsFileSystem extends H
     statistics.incrementReadOps(1);
   
     final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
-    final Map<String, Object> m = run(op, p);
+    final Map<?, ?> m = run(op, p);
     return JsonUtil.toMD5MD5CRC32FileChecksum(m);
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1190077&r1=1190076&r2=1190077&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Thu Oct 27 23:13:26 2011
@@ -27,6 +27,8 @@ import java.net.URI;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
+import javax.servlet.http.HttpServletResponse;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -39,6 +41,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -257,4 +260,50 @@ public class TestWebHdfsFileSystemContra
       WebHdfsFileSystem.LOG.info("This is expected.", e);
     }
   }
+
+  public void testResponseCode() throws IOException {
+    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+    final Path dir = new Path("/test/testUrl");
+    assertTrue(webhdfs.mkdirs(dir));
+
+    {//test set owner with empty parameters
+      final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
+      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.connect();
+      assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
+      conn.disconnect();
+    }
+
+    {//test set replication on a directory
+      final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
+      final URL url = webhdfs.toUrl(op, dir);
+      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.connect();
+      assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
+      
+      assertFalse(webhdfs.setReplication(dir, (short)1));
+      conn.disconnect();
+    }
+
+    {//test get file status for a non-exist file.
+      final Path p = new Path(dir, "non-exist");
+      final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p);
+      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.connect();
+      assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode());
+      conn.disconnect();
+    }
+
+    {//test set permission with empty parameters
+      final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
+      final URL url = webhdfs.toUrl(op, dir);
+      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.connect();
+      assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+      assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
+      conn.disconnect();
+    }
+  }
 }