You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/02/06 22:47:07 UTC
[35/50] [abbrv] hadoop git commit: HDFS-7655. Expose truncate API for
Web HDFS. (yliu)
HDFS-7655. Expose truncate API for Web HDFS. (yliu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04fe613b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04fe613b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04fe613b
Branch: refs/heads/HDFS-EC
Commit: 04fe613b567761a18f9f2147a4e9b89fdd7a2cec
Parents: 237996a
Author: yliu <yl...@apache.org>
Authored: Thu Feb 5 23:45:06 2015 +0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Fri Feb 6 13:45:51 2015 -0800
----------------------------------------------------------------------
.../apache/hadoop/fs/FileSystemTestHelper.java | 27 +++++++----
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../web/resources/NamenodeWebHdfsMethods.java | 30 ++++++++----
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 8 ++++
.../hdfs/web/resources/NewLengthParam.java | 49 ++++++++++++++++++++
.../hadoop/hdfs/web/resources/PostOpParam.java | 2 +
.../hdfs/web/TestFSMainOperationsWebHdfs.java | 29 ++++++++++++
7 files changed, 129 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index a5d8403..4a88c51 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -22,7 +22,6 @@ import java.io.FileNotFoundException;
import java.net.URI;
import java.util.Random;
-
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.Token;
@@ -127,28 +126,36 @@ public class FileSystemTestHelper {
*/
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, short numRepl, boolean createParent) throws IOException {
- FSDataOutputStream out =
- fSys.create(path, false, 4096, numRepl, blockSize );
+ return createFile(fSys, path, getFileData(numBlocks, blockSize),
+ blockSize, numRepl);
+ }
- byte[] data = getFileData(numBlocks, blockSize);
- out.write(data, 0, data.length);
- out.close();
+ public static long createFile(FileSystem fSys, Path path, byte[] data,
+ int blockSize, short numRepl) throws IOException {
+ FSDataOutputStream out =
+ fSys.create(path, false, 4096, numRepl, blockSize);
+ try {
+ out.write(data, 0, data.length);
+ } finally {
+ out.close();
+ }
return data.length;
}
-
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, boolean createParent) throws IOException {
- return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(path), true);
+ return createFile(fSys, path, numBlocks, blockSize,
+ fSys.getDefaultReplication(path), true);
}
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize) throws IOException {
- return createFile(fSys, path, numBlocks, blockSize, true);
+ return createFile(fSys, path, numBlocks, blockSize, true);
}
public static long createFile(FileSystem fSys, Path path) throws IOException {
- return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, DEFAULT_NUM_REPL, true);
+ return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE,
+ DEFAULT_NUM_REPL, true);
}
public long createFile(FileSystem fSys, String name) throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62ab1f9..ac73ab9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -306,6 +306,8 @@ Release 2.7.0 - UNRELEASED
HDFS-6673. Add delimited format support to PB OIV tool. (Eddy Xu via wang)
+ HDFS-7655. Expose truncate API for Web HDFS. (yliu)
+
IMPROVEMENTS
HDFS-7055. Add tracing to DFSInputStream (cmccabe)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index e688bb6..0a6f133 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -57,7 +57,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -614,10 +614,12 @@ public class NamenodeWebHdfsMethods {
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
- final ExcludeDatanodesParam excludeDatanodes
+ final ExcludeDatanodesParam excludeDatanodes,
+ @QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT)
+ final NewLengthParam newLength
) throws IOException, InterruptedException {
return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs,
- bufferSize, excludeDatanodes);
+ bufferSize, excludeDatanodes, newLength);
}
/** Handle HTTP POST request. */
@@ -641,11 +643,13 @@ public class NamenodeWebHdfsMethods {
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
- final ExcludeDatanodesParam excludeDatanodes
+ final ExcludeDatanodesParam excludeDatanodes,
+ @QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT)
+ final NewLengthParam newLength
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize,
- excludeDatanodes);
+ excludeDatanodes, newLength);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
@@ -653,7 +657,7 @@ public class NamenodeWebHdfsMethods {
try {
return post(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, concatSrcs, bufferSize,
- excludeDatanodes);
+ excludeDatanodes, newLength);
} finally {
reset();
}
@@ -670,9 +674,11 @@ public class NamenodeWebHdfsMethods {
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize,
- final ExcludeDatanodesParam excludeDatanodes
+ final ExcludeDatanodesParam excludeDatanodes,
+ final NewLengthParam newLength
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
+ final NamenodeProtocols np = getRPCServer(namenode);
switch(op.getValue()) {
case APPEND:
@@ -684,9 +690,17 @@ public class NamenodeWebHdfsMethods {
}
case CONCAT:
{
- getRPCServer(namenode).concat(fullpath, concatSrcs.getAbsolutePaths());
+ np.concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
+ case TRUNCATE:
+ {
+ // We treat each rest request as a separate client.
+ final boolean b = np.truncate(fullpath, newLength.getValue(),
+ "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
+ final String js = JsonUtil.toJsonString("boolean", b);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
default:
throw new UnsupportedOperationException(op + " is not supported");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 460e78b..938f7c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1161,6 +1161,14 @@ public class WebHdfsFileSystem extends FileSystem
}
@Override
+ public boolean truncate(Path f, long newLength) throws IOException {
+ statistics.incrementWriteOps(1);
+
+ final HttpOpParam.Op op = PostOpParam.Op.TRUNCATE;
+ return new FsPathBooleanRunner(op, f, new NewLengthParam(newLength)).run();
+ }
+
+ @Override
public boolean delete(Path f, boolean recursive) throws IOException {
final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
return new FsPathBooleanRunner(op, f,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
new file mode 100644
index 0000000..83aba9e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** NewLength parameter. */
+public class NewLengthParam extends LongParam {
+ /** Parameter name. */
+ public static final String NAME = "newlength";
+ /** Default parameter value. */
+ public static final String DEFAULT = NULL;
+
+ private static final Domain DOMAIN = new Domain(NAME);
+
+ /**
+ * Constructor.
+ * @param value the parameter value.
+ */
+ public NewLengthParam(final Long value) {
+ super(DOMAIN, value, 0L, null);
+ }
+
+ /**
+ * Constructor.
+ * @param str a string representation of the parameter value.
+ */
+ public NewLengthParam(final String str) {
+ this(DOMAIN.parse(str));
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
index 54034f0..13f792e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
@@ -27,6 +27,8 @@ public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
CONCAT(false, HttpURLConnection.HTTP_OK),
+ TRUNCATE(false, HttpURLConnection.HTTP_OK),
+
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final boolean doOutputAndRedirect;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04fe613b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index 4975a87..80369fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -29,11 +29,13 @@ import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -136,6 +138,33 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
Assert.assertEquals(1024*4, fileStatus.getLen());
}
+ @Test
+ public void testTruncate() throws Exception {
+ final short repl = 3;
+ final int blockSize = 1024;
+ final int numOfBlocks = 2;
+ Path dir = getTestRootPath(fSys, "test/hadoop");
+ Path file = getTestRootPath(fSys, "test/hadoop/file");
+
+ final byte[] data = getFileData(numOfBlocks, blockSize);
+ createFile(fSys, file, data, blockSize, repl);
+
+ final int newLength = blockSize;
+
+ boolean isReady = fSys.truncate(file, newLength);
+
+ Assert.assertTrue("Recovery is not expected.", isReady);
+
+ FileStatus fileStatus = fSys.getFileStatus(file);
+ Assert.assertEquals(fileStatus.getLen(), newLength);
+ AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());
+
+ ContentSummary cs = fSys.getContentSummary(dir);
+ Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
+ newLength * repl);
+ Assert.assertTrue("Deleted", fSys.delete(dir, true));
+ }
+
// Test that WebHdfsFileSystem.jsonParse() closes the connection's input
// stream.
// Closing the inputstream in jsonParse will allow WebHDFS to reuse