You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/25 20:53:25 UTC

svn commit: r1402270 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

Author: szetszwo
Date: Thu Oct 25 18:53:25 2012
New Revision: 1402270

URL: http://svn.apache.org/viewvc?rev=1402270&view=rev
Log:
HDFS-3948. Do not use hflush in TestWebHDFS.testNamenodeRestart() since the out stream returned by WebHdfsFileSystem does not support it. Contributed by Jing Zhao

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1402270&r1=1402269&r2=1402270&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 25 18:53:25 2012
@@ -496,6 +496,10 @@ Release 2.0.3-alpha - Unreleased 
     HDFS-4022. Replication not happening for appended block.
     (Vinay via umamahesh)
 
+    HDFS-3948. Do not use hflush in TestWebHDFS.testNamenodeRestart() since the
+    out stream returned by WebHdfsFileSystem does not support it. (Jing Zhao
+    via szetszwo)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1402270&r1=1402269&r2=1402270&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Oct 25 18:53:25 2012
@@ -876,7 +876,14 @@ public class TestDFSClientRetries {
       new Random().nextBytes(bytes);
       out4.write(bytes);
       out4.write(bytes);
-      out4.hflush();
+      if (isWebHDFS) {
+        // WebHDFS does not support hflush. To avoid DataNode communicating with
+        // NN while we're shutting down NN, we call out4.close() to finish
+        // writing the data
+        out4.close();
+      } else {
+        out4.hflush();
+      }
 
       //shutdown namenode
       assertTrue(HdfsUtils.isHealthy(uri));
@@ -889,10 +896,12 @@ public class TestDFSClientRetries {
         public void run() {
           try {
             //write some more data and then close the file
-            out4.write(bytes);
-            out4.write(bytes);
-            out4.write(bytes);
-            out4.close();
+            if (!isWebHDFS) {
+              out4.write(bytes);
+              out4.write(bytes);
+              out4.write(bytes);
+              out4.close();
+            }
           } catch (Exception e) {
             exceptions.add(e);
           }
@@ -975,7 +984,11 @@ public class TestDFSClientRetries {
           Assert.assertEquals(String.format("count=%d", count),
               bytes[count % bytes.length], (byte)r);
         }
-        Assert.assertEquals(5 * bytes.length, count);
+        if (!isWebHDFS) {
+          Assert.assertEquals(5 * bytes.length, count);
+        } else {
+          Assert.assertEquals(2 * bytes.length, count);
+        }
         in.close();
       }