You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/02/23 20:37:02 UTC

[14/52] [abbrv] hadoop git commit: HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)

HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fd02afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fd02afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fd02afe

Branch: refs/heads/HDFS-7285
Commit: 2fd02afeca3710f487b6a039a65c1a666322b229
Parents: 64a8375
Author: yliu <yl...@apache.org>
Authored: Thu Feb 19 08:36:31 2015 +0800
Committer: yliu <yl...@apache.org>
Committed: Thu Feb 19 08:36:31 2015 +0800

----------------------------------------------------------------------
 .../hadoop/fs/http/client/HttpFSFileSystem.java | 24 ++++++++++-
 .../hadoop/fs/http/server/FSOperations.java     | 43 +++++++++++++++++++-
 .../http/server/HttpFSParametersProvider.java   | 20 +++++++++
 .../hadoop/fs/http/server/HttpFSServer.java     | 10 +++++
 .../fs/http/client/BaseTestHttpFSWith.java      | 40 ++++++++++++++++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 6 files changed, 133 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 5b079e9..20b212e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -109,12 +109,15 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_VALUE_PARAM = "xattr.value";
   public static final String XATTR_SET_FLAG_PARAM = "flag";
   public static final String XATTR_ENCODING_PARAM = "encoding";
+  public static final String NEW_LENGTH_PARAM = "newlength";
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final String ACLSPEC_DEFAULT = "";
 
   public static final String RENAME_JSON = "boolean";
 
+  public static final String TRUNCATE_JSON = "boolean";
+
   public static final String DELETE_JSON = "boolean";
 
   public static final String MKDIRS_JSON = "boolean";
@@ -191,7 +194,7 @@ public class HttpFSFileSystem extends FileSystem
     GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
     GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
     INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
-    APPEND(HTTP_POST), CONCAT(HTTP_POST),
+    APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
     CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
     SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
     MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
@@ -568,6 +571,25 @@ public class HttpFSFileSystem extends FileSystem
   }
 
   /**
+   * Truncate a file.
+   * 
+   * @param f the file to be truncated.
+   * @param newLength The size the file is to be truncated to.
+   *
+   * @throws IOException
+   */
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.TRUNCATE.toString());
+    params.put(NEW_LENGTH_PARAM, Long.toString(newLength));
+    HttpURLConnection conn = getConnection(Operation.TRUNCATE.getMethod(),
+        params, f, true);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    return (Boolean) json.get(TRUNCATE_JSON);
+  }
+
+  /**
    * Concat existing files together.
    * @param f the path to the target destination.
    * @param psrcs the paths to the sources to use for the concatenation.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 4b72a51..bc290a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -364,7 +364,7 @@ public class FSOperations {
   }
 
   /**
-   * Executor that performs an append FileSystemAccess files system operation.
+   * Executor that performs a concat FileSystemAccess files system operation.
    */
   @InterfaceAudience.Private
   public static class FSConcat implements FileSystemAccess.FileSystemExecutor<Void> {
@@ -405,6 +405,47 @@ public class FSOperations {
   }
 
   /**
+   * Executor that performs a truncate FileSystemAccess files system operation.
+   */
+  @InterfaceAudience.Private
+  public static class FSTruncate implements 
+      FileSystemAccess.FileSystemExecutor<JSONObject> {
+    private Path path;
+    private long newLength;
+
+    /**
+     * Creates a Truncate executor.
+     *
+     * @param path target path to truncate to.
+     * @param newLength The size the file is to be truncated to.
+     */
+    public FSTruncate(String path, long newLength) {
+      this.path = new Path(path);
+      this.newLength = newLength;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return <code>true</code> if the file has been truncated to the desired,
+     *         <code>false</code> if a background process of adjusting the 
+     *         length of the last block has been started, and clients should 
+     *         wait for it to complete before proceeding with further file 
+     *         updates.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public JSONObject execute(FileSystem fs) throws IOException {
+      boolean result = fs.truncate(path, newLength);
+      return toJSON(HttpFSFileSystem.TRUNCATE_JSON.toLowerCase(), result);
+    }
+
+  }
+
+  /**
    * Executor that performs a content-summary FileSystemAccess files system operation.
    */
   @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
index fb06667..73853c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -63,6 +63,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
     PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{});
     PARAMS_DEF.put(Operation.APPEND, new Class[]{DataParam.class});
     PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
+    PARAMS_DEF.put(Operation.TRUNCATE, new Class[]{NewLengthParam.class});
     PARAMS_DEF.put(Operation.CREATE,
       new Class[]{PermissionParam.class, OverwriteParam.class,
                   ReplicationParam.class, BlockSizeParam.class, DataParam.class});
@@ -290,6 +291,25 @@ public class HttpFSParametersProvider extends ParametersProvider {
   }
 
   /**
+   * Class for newlength parameter.
+   */
+  @InterfaceAudience.Private
+  public static class NewLengthParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.NEW_LENGTH_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public NewLengthParam() {
+      super(NAME, 0l);
+    }
+  }
+
+  /**
    * Class for overwrite parameter.
    */
   @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index 9103718..1f903ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
@@ -427,6 +428,15 @@ public class HttpFSServer {
         response = Response.ok().build();
         break;
       }
+      case TRUNCATE: {
+        Long newLength = params.get(NewLengthParam.NAME, NewLengthParam.class);
+        FSOperations.FSTruncate command = 
+            new FSOperations.FSTruncate(path, newLength);
+        JSONObject json = fsExecute(user, command);
+        AUDIT_LOG.info("Truncate [{}] to length [{}]", path, newLength);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
       default: {
         throw new IOException(
           MessageFormat.format("Invalid HTTP POST operation [{0}]",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index f063e33..2cc67d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -24,12 +24,14 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -192,7 +194,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
       Assert.fail("the create should have failed because the file exists " +
                   "and override is FALSE");
     } catch (IOException ex) {
-System.out.println("#");
+      System.out.println("#");
     } catch (Exception ex) {
       Assert.fail(ex.toString());
     }
@@ -222,6 +224,31 @@ System.out.println("#");
     }
   }
 
+  private void testTruncate() throws Exception {
+    if (!isLocalFS()) {
+      final short repl = 3;
+      final int blockSize = 1024;
+      final int numOfBlocks = 2;
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path file = new Path(getProxiedFSTestDir(), "foo.txt");
+      final byte[] data = FileSystemTestHelper.getFileData(
+          numOfBlocks, blockSize);
+      FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
+
+      final int newLength = blockSize;
+
+      boolean isReady = fs.truncate(file, newLength);
+      Assert.assertTrue("Recovery is not expected.", isReady);
+
+      FileStatus fileStatus = fs.getFileStatus(file);
+      Assert.assertEquals(fileStatus.getLen(), newLength);
+      AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
+
+      fs.close();
+    }
+  }
+
   private void testConcat() throws Exception {
     Configuration config = getProxiedFSConf();
     config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
@@ -784,9 +811,10 @@ System.out.println("#");
   }
 
   protected enum Operation {
-    GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
-    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY,
-    FILEACLS, DIRACLS, SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
+    GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, 
+    WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER, 
+    SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
+    GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
   }
 
   private void operation(Operation op) throws Exception {
@@ -803,8 +831,12 @@ System.out.println("#");
       case APPEND:
         testAppend();
         break;
+      case TRUNCATE:
+        testTruncate();
+        break;
       case CONCAT:
         testConcat();
+        break;
       case RENAME:
         testRename();
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3735e90..80a086a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -337,6 +337,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7584. Enable Quota Support for Storage Types (See breakdown of
     tasks below)
 
+    HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)
+
   IMPROVEMENTS
 
     HDFS-7055. Add tracing to DFSInputStream (cmccabe)