You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/03/11 17:55:45 UTC

svn commit: r752522 - in /hadoop/core/branches/branch-0.19: ./ conf/ src/c++/libhdfs/ src/c++/libhdfs/tests/conf/ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/hdfs/org/a...

Author: dhruba
Date: Wed Mar 11 16:55:44 2009
New Revision: 752522

URL: http://svn.apache.org/viewvc?rev=752522&view=rev
Log:
HADOOP-5332. Appending to files is not allowed (by default) unless
dfs.support.append is set to true. (dhruba)


Modified:
    hadoop/core/branches/branch-0.19/CHANGES.txt
    hadoop/core/branches/branch-0.19/conf/hadoop-default.xml
    hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c
    hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml
    hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java

Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Wed Mar 11 16:55:44 2009
@@ -58,6 +58,9 @@
     HADOOP-5421. Removes the test TestRecoveryManager.java from the 0.19 branch
     as it has compilation issues. (ddas) 
 
+    HADOOP-5332. Appending to files is not allowed (by default) unless
+    dfs.support.append is set to true. (dhruba)
+
 Release 0.19.1 - 2009-02-23
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/branches/branch-0.19/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/conf/hadoop-default.xml?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/conf/hadoop-default.xml (original)
+++ hadoop/core/branches/branch-0.19/conf/hadoop-default.xml Wed Mar 11 16:55:44 2009
@@ -582,6 +582,15 @@
 </property>
 
 <property>
+  <name>dfs.support.append</name>
+  <value>false</value>
+  <description>Does HDFS allow appends to files?
+               This is currently set to false because there are bugs in the
+               "append code" and is not supported in any prodction cluster.
+  </description>
+</property>
+
+<property>
   <name>fs.s3.block.size</name>
   <value>67108864</value>
   <description>Block size to use when writing files to S3.</description>

Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/hdfs_test.c?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c Wed Mar 11 16:55:44 2009
@@ -317,7 +317,7 @@
         totalResult += (result ? 0 : 1);
     }
 
-    if (0) { // disable append tests in 0.19.x
+    {
       // TEST APPENDS
       const char *writePath = "/tmp/appends";
 

Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/tests/conf/hadoop-site.xml?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml Wed Mar 11 16:55:44 2009
@@ -30,4 +30,11 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.support.append</name>
+  <value>true</value>
+  <description>Allow appends to files.
+  </description>
+</property>
+
 </configuration>

Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed Mar 11 16:55:44 2009
@@ -175,11 +175,9 @@
   /** This optional operation is not yet supported. */
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
-    // disable append() in 0.19.x
-    throw new UnsupportedOperationException("HDFS does not support append yet");
-        
-    //DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
-    //return new FSDataOutputStream(op, statistics, op.getInitialLen());
+
+    DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
+    return new FSDataOutputStream(op, statistics, op.getInitialLen());
   }
 
   public FSDataOutputStream create(Path f, FsPermission permission,

Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Mar 11 16:55:44 2009
@@ -116,6 +116,9 @@
    * @throws AccessControlException if permission to append file is 
    * denied by the system. As usually on the client side the exception will 
    * be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+   * Allows appending to an existing file if the server is
+   * configured with the parameter dfs.support.append set to true, otherwise
+   * throws an IOException.
    * @throws IOException if other errors occur.
    */
   public LocatedBlock append(String src, String clientName) throws IOException;

Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Mar 11 16:55:44 2009
@@ -297,6 +297,7 @@
     
     FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
+      boolean supportAppend = conf.getBoolean("dfs.support.append", false);
       File parent = currentDir.getParentFile();
 
       this.detachDir = new File(parent, "detach");
@@ -308,7 +309,11 @@
       //
       this.tmpDir = new File(parent, "tmp");
       if (tmpDir.exists()) {
-        FileUtil.fullyDelete(tmpDir);
+        if (supportAppend) {
+          recoverDetachedBlocks(currentDir, tmpDir);
+        } else {
+          FileUtil.fullyDelete(tmpDir);
+        }
       }
       this.dataDir = new FSDir(currentDir);
       if (!tmpDir.mkdirs()) {

Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Mar 11 16:55:44 2009
@@ -148,7 +148,7 @@
    */
   private void initialize(String address, Configuration conf) throws IOException {
     InetSocketAddress socAddr = NameNode.getAddress(address);
-    this.supportAppends = conf.getBoolean("dfs.support.append", true);
+    this.supportAppends = conf.getBoolean("dfs.support.append", false);
     this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
     this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf);

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java Wed Mar 11 16:55:44 2009
@@ -123,13 +123,13 @@
    * Verify that all data exists in file.
    */ 
   public void testSimpleAppend() throws IOException {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setBoolean("dfs.support.append", true);
     initBuffer(fileSize);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
@@ -371,8 +371,6 @@
    * Test that appends to files at random offsets.
    */
   public void testComplexAppend() throws IOException {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     initBuffer(fileSize);
     Configuration conf = new Configuration();
     conf.setInt("heartbeat.recheck.interval", 2000);
@@ -381,6 +379,7 @@
     conf.setInt("dfs.socket.timeout", 30000);
     conf.setInt("dfs.datanode.socket.write.timeout", 30000);
     conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setBoolean("dfs.support.append", true);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
                                                 true, null);

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java Wed Mar 11 16:55:44 2009
@@ -52,6 +52,7 @@
         AppendTestUtil.LOG.info("setUp()");
         conf = new Configuration();
         conf.setInt("io.bytes.per.checksum", 512);
+        conf.setBoolean("dfs.support.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
         cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
         fs = (DistributedFileSystem)cluster.getFileSystem();
@@ -67,8 +68,6 @@
 
   /** TC1: Append on block boundary. */
   public void testTC1() throws Exception {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
 
@@ -94,8 +93,6 @@
 
   /** TC2: Append on non-block boundary. */
   public void testTC2() throws Exception {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
 
@@ -121,8 +118,6 @@
 
   /** TC5: Only one simultaneous append. */
   public void testTC5() throws Exception {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
 
@@ -150,8 +145,6 @@
 
   /** TC7: Corrupted replicas are present. */
   public void testTC7() throws Exception {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
     System.out.println("p=" + p);
@@ -197,8 +190,6 @@
 
   /** TC11: Racing rename */
   public void testTC11() throws Exception {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
 
@@ -252,8 +243,6 @@
 
   /** TC12: Append to partial CRC chunk */
   public void testTC12() throws Exception {
-    /* HDFS append() is temporarily disabled in 0.19 */
-    if (true) return;
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);
     
@@ -278,4 +267,4 @@
     //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
     AppendTestUtil.check(fs, p, len1 + len2);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java Wed Mar 11 16:55:44 2009
@@ -37,10 +37,8 @@
   }
 
   public void testFileCreationDeleteParent() throws IOException {
-    /* XXX This test is temporarily disabled since sync() is not supported in
-     * 0.19.1.*/
-    if (true) return;
     Configuration conf = new Configuration();
+    conf.setBoolean("dfs.support.append", true);
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java Wed Mar 11 16:55:44 2009
@@ -58,6 +58,7 @@
     final int ORG_FILE_SIZE = 3000; 
     Configuration conf = new Configuration();
     conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = null;
 
     try {

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java Wed Mar 11 16:55:44 2009
@@ -429,6 +429,7 @@
     // set a smaller block size so that we can test with smaller 
     // diskspace quotas
     conf.set("dfs.block.size", "512");
+    conf.setBoolean("dfs.support.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -547,7 +548,6 @@
       c = dfs.getContentSummary(dstPath);
       assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
       
-      /* NOTE: append() is not supported in 0.18.
       OutputStream out = dfs.append(file2);
       // appending 1 fileLen should succeed
       out.write(new byte[fileLen]);
@@ -579,10 +579,6 @@
       // verify space after partial append
       c = dfs.getContentSummary(dstPath);
       assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
-      == end of append test == */
-      
-      // reduce quota for quotaDir1 to account for not appending 
-      dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 3 * fileSpace);
       
       // Test set replication :
       
@@ -591,7 +587,7 @@
       
       // verify that space is reduced by file2Len
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 3 * fileSpace - file2Len);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
       
       // now try to increase the replication and and expect an error.
       hasException = false;
@@ -604,7 +600,7 @@
 
       // verify space consumed remains unchanged.
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 3 * fileSpace - file2Len);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
       
       // now increase the quota for quotaDir1 and quotaDir20
       dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
@@ -614,7 +610,7 @@
       dfs.setReplication(file2, (short)(replication+1));
       // verify increase in space
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 3 * fileSpace + file2Len);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
       
     } finally {
       cluster.shutdown();

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=752522&r1=752521&r2=752522&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Wed Mar 11 16:55:44 2009
@@ -42,15 +42,13 @@
    * move /user/dir1 /user/dir3
    */
   public void testWhileOpenRenameParent() throws IOException {
-    /* XXX This test is temporarily disabled since sync() is not supported in
-     * 0.18.3. This is a 0.18.3 only change. */
-    if (true) return;
     Configuration conf = new Configuration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     System.out.println("Test 1*****************************");
@@ -117,15 +115,13 @@
    * move /user/dir1 /user/dir3
    */
   public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
-    /* XXX This test is temporarily disabled since sync() is not supported in
-     * 0.19.1. */
-    if (true) return;
     Configuration conf = new Configuration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 2************************************");
 
     // create cluster
@@ -192,15 +188,13 @@
    * move /user/dir1/file1 /user/dir2/
    */
   public void testWhileOpenRenameToExistentDirectory() throws IOException {
-    /* XXX This test is temporarily disabled since sync() is not supported in
-     * 0.19.1 */
-    if (true) return;
     Configuration conf = new Configuration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 3************************************");
 
     // create cluster
@@ -257,15 +251,13 @@
    * move /user/dir1/file1 /user/dir2/
    */
   public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
-    /* XXX This test is temporarily disabled since sync() is not supported in
-     * 0.19.1 */
-    if (true) return;
     Configuration conf = new Configuration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 4************************************");
 
     // create cluster