You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2013/06/18 23:05:16 UTC

svn commit: r1494303 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/

Author: atm
Date: Tue Jun 18 21:05:16 2013
New Revision: 1494303

URL: http://svn.apache.org/r1494303
Log:
HDFS-4906. HDFS Output streams should not accept writes after being closed. Contributed by Aaron T. Myers.

Added:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1494303&r1=1494302&r2=1494303&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jun 18 21:05:16 2013
@@ -582,6 +582,9 @@ Release 2.1.0-beta - UNRELEASED
     HDFS-4845. FSNamesystem.deleteInternal should acquire write-lock before
     changing the inode map.  (Arpit Agarwal via szetszwo)
 
+    HDFS-4906. HDFS Output streams should not accept writes after being
+    closed. (atm)
+
   BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
 
     HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1494303&r1=1494302&r2=1494303&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Jun 18 21:05:16 2013
@@ -30,6 +30,7 @@ import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.nio.BufferOverflowException;
+import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
@@ -1299,10 +1300,10 @@ public class DFSOutputStream extends FSO
     return sock;
   }
 
-  private void isClosed() throws IOException {
+  protected void checkClosed() throws IOException {
     if (closed) {
       IOException e = lastException;
-      throw e != null ? e : new IOException("DFSOutputStream is closed");
+      throw e != null ? e : new ClosedChannelException();
     }
   }
 
@@ -1471,7 +1472,7 @@ public class DFSOutputStream extends FSO
           break;
         }
       }
-      isClosed();
+      checkClosed();
       queueCurrentPacket();
     }
   }
@@ -1481,7 +1482,7 @@ public class DFSOutputStream extends FSO
   protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] checksum) 
                                                         throws IOException {
     dfsClient.checkOpen();
-    isClosed();
+    checkClosed();
 
     int cklen = checksum.length;
     int bytesPerChecksum = this.checksum.getBytesPerChecksum(); 
@@ -1608,7 +1609,7 @@ public class DFSOutputStream extends FSO
   private void flushOrSync(boolean isSync, EnumSet<SyncFlag> syncFlags)
       throws IOException {
     dfsClient.checkOpen();
-    isClosed();
+    checkClosed();
     try {
       long toWaitFor;
       long lastBlockLength = -1L;
@@ -1695,7 +1696,7 @@ public class DFSOutputStream extends FSO
           // If we got an error here, it might be because some other thread called
           // close before our hflush completed. In that case, we should throw an
           // exception that the stream is closed.
-          isClosed();
+          checkClosed();
           // If we aren't closed but failed to sync, we should expose that to the
           // caller.
           throw ioe;
@@ -1740,7 +1741,7 @@ public class DFSOutputStream extends FSO
    */
   public synchronized int getCurrentBlockReplication() throws IOException {
     dfsClient.checkOpen();
-    isClosed();
+    checkClosed();
     if (streamer == null) {
       return blockReplication; // no pipeline, return repl factor of file
     }
@@ -1759,7 +1760,7 @@ public class DFSOutputStream extends FSO
     long toWaitFor;
     synchronized (this) {
       dfsClient.checkOpen();
-      isClosed();
+      checkClosed();
       //
       // If there is data in the current buffer, send it across
       //
@@ -1776,7 +1777,7 @@ public class DFSOutputStream extends FSO
     }
     synchronized (dataQueue) {
       while (!closed) {
-        isClosed();
+        checkClosed();
         if (lastAckedSeqno >= seqno) {
           break;
         }
@@ -1788,7 +1789,7 @@ public class DFSOutputStream extends FSO
         }
       }
     }
-    isClosed();
+    checkClosed();
   }
 
   private synchronized void start() {

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java?rev=1494303&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java Tue Jun 18 21:05:16 2013
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.channels.ClosedChannelException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Test;
+
+public class TestClose {
+
+  @Test
+  public void testWriteAfterClose() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .build();
+    
+    try {
+      final byte[] data = "foo".getBytes();
+      
+      FileSystem fs = FileSystem.get(conf);
+      OutputStream out = fs.create(new Path("/test"));
+      
+      out.write(data);
+      out.close();
+      try {
+        // Should fail.
+        out.write(data);
+        fail("Should not have been able to write more data after file is closed.");
+      } catch (ClosedChannelException cce) {
+        // We got the correct exception. Ignoring.
+      }
+      // Should succeed. Double closes are OK.
+      out.close();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+}

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java?rev=1494303&r1=1494302&r2=1494303&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java Tue Jun 18 21:05:16 2013
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
@@ -169,12 +170,9 @@ public class TestMultiThreadedHflush {
                 while (true) {
                   try {
                     stm.hflush();
-                  } catch (IOException ioe) {
-                    if (!ioe.toString().contains("DFSOutputStream is closed")) {
-                      throw ioe;
-                    } else {
-                      return;
-                    }
+                  } catch (ClosedChannelException ioe) {
+                    // Expected exception caught. Ignoring.
+                    return;
                   }
                 }
               } catch (Throwable t) {