You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2009/10/30 21:02:48 UTC

svn commit: r831436 - in /hadoop/hdfs/trunk: ./ lib/ src/java/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/

Author: hairong
Date: Fri Oct 30 20:02:45 2009
New Revision: 831436

URL: http://svn.apache.org/viewvc?rev=831436&view=rev
Log:
HDFS-731. Support new Syncable interface in HDFS. Contributed by Hairong Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/lib/hadoop-core-0.22.0-dev.jar
    hadoop/hdfs/trunk/lib/hadoop-core-test-0.22.0-dev.jar
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Oct 30 20:02:45 2009
@@ -137,6 +137,8 @@
     HDFS-631. Rename configuration keys towards API standardization and
     backward compatibility. (Jitendra Nath Pandey via suresh)
 
+    HDFS-731. Support new Syncable interface in HDFS. (hairong)
+
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file

Modified: hadoop/hdfs/trunk/lib/hadoop-core-0.22.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/lib/hadoop-core-0.22.0-dev.jar?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/trunk/lib/hadoop-core-test-0.22.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/lib/hadoop-core-test-0.22.0-dev.jar?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Fri Oct 30 20:02:45 2009
@@ -3495,23 +3495,24 @@
       }
     }
   
-    /**
-     * @deprecated As of HDFS 0.21.0, replaced by hflush
-     * @see #hflush()
-     */
+    @Override
     @Deprecated
     public synchronized void sync() throws IOException {
       hflush();
     }
     
     /**
-     * All data is flushed out to datanodes.
+     * flushes out to all replicas of the block. 
+     * The data is in the buffers of the DNs 
+     * but not neccessary on the DN's OS buffers. 
+     *
      * It is a synchronous operation. When it returns,
      * it gurantees that flushed data become visible to new readers. 
      * It is not guaranteed that data has been flushed to 
      * persistent store on the datanode. 
      * Block allocations are persisted on namenode.
      */
+    @Override
     public synchronized void hflush() throws IOException {
       checkOpen();
       isClosed();
@@ -3562,6 +3563,18 @@
     }
 
     /**
+     * The expected semantics is all data have flushed out to all replicas 
+     * and all replicas have done posix fsync equivalent - ie the OS has 
+     * flushed it to the disk device (but the disk may have it in its cache).
+     * 
+     * Right now by default it is implemented as hflush
+     */
+    @Override
+    public synchronized void hsync() throws IOException {
+      hflush();
+    }
+    
+    /**
      * Waits till all existing data is flushed and confirmations 
      * received from datanodes. 
      */

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java Fri Oct 30 20:02:45 2009
@@ -99,7 +99,7 @@
           // append flushedBytes bytes to the file
           out = fs.append(p);
           out.write(contents, oldFileLen, flushedBytes1);
-          out.sync();
+          out.hflush();
 
           // write another flushedBytes2 bytes to the file
           out.write(contents, oldFileLen + flushedBytes1, flushedBytes2);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java Fri Oct 30 20:02:45 2009
@@ -47,7 +47,7 @@
       for(int i = 0; i < 1024; i++) {
         fout.write(123);
       }
-      fout.sync();
+      fout.hflush();
   
       //try reading the block by someone
       final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Fri Oct 30 20:02:45 2009
@@ -189,14 +189,14 @@
       // write to file
       int mid = AppendTestUtil.FILE_SIZE /2;
       stm.write(fileContents, 0, mid);
-      stm.sync();
+      stm.hflush();
       System.out.println("Wrote and Flushed first part of file.");
 
       // write the remainder of the file
       stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
       System.out.println("Written second part of file");
-      stm.sync();
-      stm.sync();
+      stm.hflush();
+      stm.hflush();
       System.out.println("Wrote and Flushed second part of file.");
 
       // verify that full blocks are sane
@@ -244,7 +244,7 @@
       int start = 0;
       for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
         stm.write(fileContents, start, 29);
-        stm.sync();
+        stm.hflush();
         start += 29;
       }
       stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Oct 30 20:02:45 2009
@@ -236,7 +236,7 @@
     FSDataOutputStream out = fs.append(p);
     final int len2 = (int)BLOCK_SIZE/2; 
     AppendTestUtil.write(out, len1, len2);
-    out.sync();
+    out.hflush();
     
     //c. Rename file to file.new.
     final Path pnew = new Path(p + ".new");
@@ -327,7 +327,7 @@
     stm = fs.append(p);
     // Append to a partial CRC trunk
     stm.write(fileContents, 1, 1);
-    stm.sync();
+    stm.hflush();
     // The partial CRC trunk is not full yet and close the file
     stm.close();
     System.out.println("Append 1 byte and closed the file " + p);
@@ -341,11 +341,11 @@
     // append to a partial CRC trunk
     stm.write(fileContents, 2, 1);
     // The partial chunk is not full yet, force to send a packet to DN
-    stm.sync();
+    stm.hflush();
     System.out.println("Append and flush 1 byte");
     // The partial chunk is not full yet, force to send another packet to DN
     stm.write(fileContents, 3, 2);
-    stm.sync();
+    stm.hflush();
     System.out.println("Append and flush 2 byte");
 
     // fill up the partial chunk and close the file

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Fri Oct 30 20:02:45 2009
@@ -512,7 +512,7 @@
 
       // write two full blocks.
       writeFile(stm, numBlocks * blockSize);
-      stm.sync();
+      stm.hflush();
 
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
@@ -880,7 +880,7 @@
   }
 
   /**
-   * Create a file, write something, fsync but not close.
+   * Create a file, write something, hflush but not close.
    * Then change lease period and wait for lease recovery.
    * Finally, read the block directly from each Datanode and verify the content.
    */
@@ -905,7 +905,7 @@
       final Path fpath = new Path(f);
       FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
       out.write("something".getBytes());
-      out.sync();
+      out.hflush();
 
       // set the soft and hard limit to be 1 second so that the
       // namenode triggers lease recovery
@@ -991,7 +991,7 @@
       final Path fpath = new Path(f);
       FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
       out.write("something_dhruba".getBytes());
-      out.sync();    // ensure that block is allocated
+      out.hflush();    // ensure that block is allocated
 
       // shutdown last datanode in pipeline.
       cluster.stopDataNode(2);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java Fri Oct 30 20:02:45 2009
@@ -129,7 +129,7 @@
         for(; running; i++) {
           System.out.println(getName() + " writes " + i);
           out.write(i);
-          out.sync();
+          out.hflush();
           sleep(100);
         }
       }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java Fri Oct 30 20:02:45 2009
@@ -59,7 +59,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file1);
       TestFileCreation.writeFile(stm1, 1000);
-      stm1.sync();
+      stm1.hflush();
 
       // create file2.
       Path file2 = new Path("/file2");
@@ -67,7 +67,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file2);
       TestFileCreation.writeFile(stm2, 1000);
-      stm2.sync();
+      stm2.hflush();
 
       // rm dir
       fs.delete(dir, true);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Oct 30 20:02:45 2009
@@ -79,9 +79,9 @@
       System.out.println("size=" + size);
       stm.write(buffer, 0, size);
 
-      // sync file
-      AppendTestUtil.LOG.info("sync");
-      stm.sync();
+      // hflush file
+      AppendTestUtil.LOG.info("hflush");
+      stm.hflush();
       AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
       dfs.dfs.leasechecker.interruptAndJoin();
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Fri Oct 30 20:02:45 2009
@@ -71,7 +71,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file1);
       TestFileCreation.writeFile(stm1);
-      stm1.sync();
+      stm1.hflush();
 
       // create file2.
       Path dir2 = new Path("/user/dir2");
@@ -80,7 +80,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file2);
       TestFileCreation.writeFile(stm2);
-      stm2.sync();
+      stm2.hflush();
 
       // move dir1 while file1 is open
       Path dir3 = new Path("/user/dir3");
@@ -155,7 +155,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file1);
       TestFileCreation.writeFile(stm1);
-      stm1.sync();
+      stm1.hflush();
 
       // create file2.
       Path dir2 = new Path("/user/dir2");
@@ -164,7 +164,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file2);
       TestFileCreation.writeFile(stm2);
-      stm2.sync();
+      stm2.hflush();
 
       // move dir1 while file1 is open
       Path dir3 = new Path("/user/dir3");
@@ -228,7 +228,7 @@
       System.out.println("testFileCreationDeleteParent: " +
                          "Created file " + file1);
       TestFileCreation.writeFile(stm1);
-      stm1.sync();
+      stm1.hflush();
 
       Path dir2 = new Path("/user/dir2");
       fs.mkdirs(dir2);
@@ -291,7 +291,7 @@
       System.out.println("testFileCreationDeleteParent: "
           + "Created file " + file1);
       TestFileCreation.writeFile(stm1);
-      stm1.sync();
+      stm1.hflush();
 
       Path dir2 = new Path("/user/dir2");
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java Fri Oct 30 20:02:45 2009
@@ -96,7 +96,7 @@
       new Random().nextBytes(writeBuf);
       out = fs.create(src);
       out.write(writeBuf);
-      out.sync();
+      out.hflush();
       DataNode dn = cluster.getDataNodes().get(0);
       for (FSVolume volume : ((FSDataset)dn.data).volumes.volumes) {
         File currentDir = volume.getDir().getParentFile();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java?rev=831436&r1=831435&r2=831436&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java Fri Oct 30 20:02:45 2009
@@ -203,7 +203,7 @@
       stm = fs.append(fileToAppend);
       int mid = rawData.length - 1;
       stm.write(rawData, 1, mid - 1);
-      stm.sync();
+      stm.hflush();
 
       /*
        * wait till token used in stm expires
@@ -255,7 +255,7 @@
       // write a partial block
       int mid = rawData.length - 1;
       stm.write(rawData, 0, mid);
-      stm.sync();
+      stm.hflush();
 
       /*
        * wait till token used in stm expires