You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2013/08/06 00:11:31 UTC
svn commit: r1510780 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs:
CHANGES.txt src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
Author: cnauroth
Date: Mon Aug 5 22:11:30 2013
New Revision: 1510780
URL: http://svn.apache.org/r1510780
Log:
HDFS-4905. Merging change r1510773 from trunk to branch-2.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1510780&r1=1510779&r2=1510780&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Aug 5 22:11:30 2013
@@ -58,6 +58,9 @@ Release 2.1.1-beta - UNRELEASED
HDFS-5061. Make FSNameSystem#auditLoggers an unmodifiable list.
(Arpit Agarwal via suresh)
+ HDFS-4905. Add appendToFile command to "hdfs dfs". (Arpit Agarwal via
+ cnauroth)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1510780&r1=1510779&r2=1510780&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Mon Aug 5 22:11:30 2013
@@ -17,17 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.PrintWriter;
+import java.io.*;
import java.security.Permission;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
@@ -42,10 +32,7 @@ import java.util.zip.GZIPOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSInputChecker;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -63,6 +50,9 @@ import org.apache.hadoop.util.ToolRunner
import org.junit.Test;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.*;
/**
* This class tests commands from DFSShell.
@@ -101,6 +91,18 @@ public class TestDFSShell {
return f;
}
+ static File createLocalFileWithRandomData(int fileLength, File f)
+ throws IOException {
+ assertTrue(!f.exists());
+ f.createNewFile();
+ FileOutputStream out = new FileOutputStream(f.toString());
+ byte[] buffer = new byte[fileLength];
+ out.write(buffer);
+ out.flush();
+ out.close();
+ return f;
+ }
+
static void show(String s) {
System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
}
@@ -1732,6 +1734,85 @@ public class TestDFSShell {
}
}
+
+ @Test (timeout = 300000)
+ public void testAppendToFile() throws Exception {
+ final int inputFileLength = 1024 * 1024;
+ File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir");
+ testRoot.mkdirs();
+
+ File file1 = new File(testRoot, "file1");
+ File file2 = new File(testRoot, "file2");
+ createLocalFileWithRandomData(inputFileLength, file1);
+ createLocalFileWithRandomData(inputFileLength, file2);
+
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ try {
+ FileSystem dfs = cluster.getFileSystem();
+ assertTrue("Not a HDFS: " + dfs.getUri(),
+ dfs instanceof DistributedFileSystem);
+
+ // Run appendToFile once, make sure that the target file is
+ // created and is of the right size.
+ Path remoteFile = new Path("/remoteFile");
+ FsShell shell = new FsShell();
+ shell.setConf(conf);
+ String[] argv = new String[] {
+ "-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() };
+ int res = ToolRunner.run(shell, argv);
+ assertThat(res, is(0));
+ assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 2));
+
+ // Run the command once again and make sure that the target file
+ // size has been doubled.
+ res = ToolRunner.run(shell, argv);
+ assertThat(res, is(0));
+ assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 4));
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ @Test (timeout = 300000)
+ public void testAppendToFileBadArgs() throws Exception {
+ final int inputFileLength = 1024 * 1024;
+ File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir");
+ testRoot.mkdirs();
+
+ File file1 = new File(testRoot, "file1");
+ createLocalFileWithRandomData(inputFileLength, file1);
+
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ try {
+ FileSystem dfs = cluster.getFileSystem();
+ assertTrue("Not a HDFS: " + dfs.getUri(),
+ dfs instanceof DistributedFileSystem);
+
+ // Run appendToFile with insufficient arguments.
+ FsShell shell = new FsShell();
+ shell.setConf(conf);
+ String[] argv = new String[] {
+ "-appendToFile", file1.toString() };
+ int res = ToolRunner.run(shell, argv);
+ assertThat(res, not(0));
+
+ // Mix stdin with other input files. Must fail.
+ Path remoteFile = new Path("/remoteFile");
+ argv = new String[] {
+ "-appendToFile", file1.toString(), "-", remoteFile.toString() };
+ res = ToolRunner.run(shell, argv);
+ assertThat(res, not(0));
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
/**
* Test that the server trash configuration is respected when
* the client configuration is not set.