You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2014/07/03 14:04:52 UTC

svn commit: r1607596 [1/5] - in /hadoop/common/trunk/hadoop-common-project/hadoop-common/src: main/java/org/apache/hadoop/fs/ main/java/org/apache/hadoop/fs/ftp/ main/java/org/apache/hadoop/fs/s3/ main/java/org/apache/hadoop/fs/s3native/ site/markdown/...

Author: stevel
Date: Thu Jul  3 12:04:50 2014
New Revision: 1607596

URL: http://svn.apache.org/r1607596
Log:
HADOOP-9361: Strictly define FileSystem APIs

Added:
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/extending.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/model.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/notation.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/FTPContract.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractCreate.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractDelete.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractMkdir.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractOpen.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractRename.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/package.html   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/LocalFSContract.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractAppend.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractDelete.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMkdir.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractOpen.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractRename.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractSeek.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/RawlocalFSContract.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractUnderlyingFileBehavior.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractAppend.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractCreate.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractDelete.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractMkdir.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractOpen.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractSeek.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/NativeS3Contract.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractCreate.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractDelete.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractMkdir.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractOpen.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractRename.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractRootDir.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractSeek.java   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/resources/contract/
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/resources/contract/ftp.xml   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml   (with props)
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/resources/contract/s3n.xml   (with props)
Modified:
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java Thu Jul  3 12:04:50 2014
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.BufferedInputStream;
+import java.io.EOFException;
 import java.io.FileDescriptor;
 import java.io.IOException;
 
@@ -51,6 +52,9 @@ implements Seekable, PositionedReadable,
 
   @Override
   public long getPos() throws IOException {
+    if (in == null) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
     return ((FSInputStream)in).getPos()-(count-pos);
   }
 
@@ -66,8 +70,11 @@ implements Seekable, PositionedReadable,
 
   @Override
   public void seek(long pos) throws IOException {
-    if( pos<0 ) {
-      return;
+    if (in == null) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
+    if (pos < 0) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
     }
     if (this.pos != this.count) {
       // optimize: check if the pos is in the buffer

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java Thu Jul  3 12:04:50 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs;
 
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -318,8 +319,8 @@ public abstract class ChecksumFileSystem
 
     @Override
     public synchronized void seek(long pos) throws IOException {
-      if(pos>getFileLength()) {
-        throw new IOException("Cannot seek after EOF");
+      if (pos > getFileLength()) {
+        throw new EOFException("Cannot seek after EOF");
       }
       super.seek(pos);
     }

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java Thu Jul  3 12:04:50 2014
@@ -67,7 +67,10 @@ public class FSDataOutputStream extends 
 
     @Override
     public void close() throws IOException {
-      out.close();
+      // ensure close works even if a null reference was passed in
+      if (out != null) {
+        out.close();
+      }
     }
   }
 

Added: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java?rev=1607596&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java (added)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java Thu Jul  3 12:04:50 2014
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+/**
+ * Standard strings to use in exception messages in filesystems
+ * HDFS is used as the reference source of the strings
+ */
+public class FSExceptionMessages {
+
+  /**
+   * The operation failed because the stream is closed: {@value}
+   */
+  public static final String STREAM_IS_CLOSED = "Stream is closed!";
+
+  /**
+   * Negative offset seek forbidden : {@value}
+   */
+  public static final String NEGATIVE_SEEK =
+    "Cannot seek to a negative offset";
+
+  /**
+   * Seeks : {@value}
+   */
+  public static final String CANNOT_SEEK_PAST_EOF =
+      "Attempted to seek or read past the end of the file";
+}

Propchange: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java Thu Jul  3 12:04:50 2014
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.zip.Checksum;
@@ -394,8 +395,8 @@ abstract public class FSInputChecker ext
 
   @Override
   public synchronized void seek(long pos) throws IOException {
-    if( pos<0 ) {
-      return;
+    if( pos < 0 ) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
     }
     // optimize: check if the pos is in the buffer
     long start = chunkPos - this.count;

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java Thu Jul  3 12:04:50 2014
@@ -23,6 +23,7 @@ import com.google.common.annotations.Vis
 
 import java.io.BufferedOutputStream;
 import java.io.DataOutput;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -105,6 +106,10 @@ public class RawLocalFileSystem extends 
     
     @Override
     public void seek(long pos) throws IOException {
+      if (pos < 0) {
+        throw new EOFException(
+          FSExceptionMessages.NEGATIVE_SEEK);
+      }
       fis.getChannel().position(pos);
       this.position = pos;
     }
@@ -256,7 +261,7 @@ public class RawLocalFileSystem extends 
       boolean createParent, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
     if (exists(f) && !overwrite) {
-      throw new IOException("File already exists: "+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     Path parent = f.getParent();
     if (parent != null && !mkdirs(parent)) {
@@ -272,7 +277,7 @@ public class RawLocalFileSystem extends 
       EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
     if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) {
-      throw new IOException("File already exists: "+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     return new FSDataOutputStream(new BufferedOutputStream(
         new LocalFSFileOutputStream(f, false), bufferSize), statistics);
@@ -344,6 +349,10 @@ public class RawLocalFileSystem extends 
   @Override
   public boolean delete(Path p, boolean recursive) throws IOException {
     File f = pathToFile(p);
+    if (!f.exists()) {
+      //no path, return false "nothing to delete"
+      return false;
+    }
     if (f.isFile()) {
       return f.delete();
     } else if (!recursive && f.isDirectory() && 
@@ -412,10 +421,14 @@ public class RawLocalFileSystem extends 
     if(parent != null) {
       File parent2f = pathToFile(parent);
       if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
-        throw new FileAlreadyExistsException("Parent path is not a directory: " 
+        throw new ParentNotDirectoryException("Parent path is not a directory: "
             + parent);
       }
     }
+    if (p2f.exists() && !p2f.isDirectory()) {
+      throw new FileNotFoundException("Destination exists" +
+              " and is not a directory: " + p2f.getCanonicalPath());
+    }
     return (parent == null || mkdirs(parent)) &&
       (p2f.mkdir() || p2f.isDirectory());
   }

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java Thu Jul  3 12:04:50 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ftp;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.ConnectException;
 import java.net.URI;
 
 import org.apache.commons.logging.Log;
@@ -33,11 +34,14 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -56,6 +60,12 @@ public class FTPFileSystem extends FileS
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 
   public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
+  public static final String FS_FTP_USER_PREFIX = "fs.ftp.user.";
+  public static final String FS_FTP_HOST = "fs.ftp.host";
+  public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
+  public static final String FS_FTP_PASSWORD_PREFIX = "fs.ftp.password.";
+  public static final String E_SAME_DIRECTORY_ONLY =
+      "only same directory renames are supported";
 
   private URI uri;
 
@@ -75,11 +85,11 @@ public class FTPFileSystem extends FileS
     super.initialize(uri, conf);
     // get host information from uri (overrides info in conf)
     String host = uri.getHost();
-    host = (host == null) ? conf.get("fs.ftp.host", null) : host;
+    host = (host == null) ? conf.get(FS_FTP_HOST, null) : host;
     if (host == null) {
       throw new IOException("Invalid host specified");
     }
-    conf.set("fs.ftp.host", host);
+    conf.set(FS_FTP_HOST, host);
 
     // get port information from uri, (overrides info in conf)
     int port = uri.getPort();
@@ -96,11 +106,11 @@ public class FTPFileSystem extends FileS
       }
     }
     String[] userPasswdInfo = userAndPassword.split(":");
-    conf.set("fs.ftp.user." + host, userPasswdInfo[0]);
+    conf.set(FS_FTP_USER_PREFIX + host, userPasswdInfo[0]);
     if (userPasswdInfo.length > 1) {
-      conf.set("fs.ftp.password." + host, userPasswdInfo[1]);
+      conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
     } else {
-      conf.set("fs.ftp.password." + host, null);
+      conf.set(FS_FTP_PASSWORD_PREFIX + host, null);
     }
     setConf(conf);
     this.uri = uri;
@@ -115,23 +125,24 @@ public class FTPFileSystem extends FileS
   private FTPClient connect() throws IOException {
     FTPClient client = null;
     Configuration conf = getConf();
-    String host = conf.get("fs.ftp.host");
-    int port = conf.getInt("fs.ftp.host.port", FTP.DEFAULT_PORT);
-    String user = conf.get("fs.ftp.user." + host);
-    String password = conf.get("fs.ftp.password." + host);
+    String host = conf.get(FS_FTP_HOST);
+    int port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT);
+    String user = conf.get(FS_FTP_USER_PREFIX + host);
+    String password = conf.get(FS_FTP_PASSWORD_PREFIX + host);
     client = new FTPClient();
     client.connect(host, port);
     int reply = client.getReplyCode();
     if (!FTPReply.isPositiveCompletion(reply)) {
-      throw new IOException("Server - " + host
-          + " refused connection on port - " + port);
+      throw NetUtils.wrapException(host, port,
+                   NetUtils.UNKNOWN_HOST, 0,
+                   new ConnectException("Server response " + reply));
     } else if (client.login(user, password)) {
       client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE);
       client.setFileType(FTP.BINARY_FILE_TYPE);
       client.setBufferSize(DEFAULT_BUFFER_SIZE);
     } else {
       throw new IOException("Login failed on server - " + host + ", port - "
-          + port);
+          + port + " as user '" + user + "'");
     }
 
     return client;
@@ -179,7 +190,7 @@ public class FTPFileSystem extends FileS
     FileStatus fileStat = getFileStatus(client, absolute);
     if (fileStat.isDirectory()) {
       disconnect(client);
-      throw new IOException("Path " + file + " is a directory.");
+      throw new FileNotFoundException("Path " + file + " is a directory.");
     }
     client.allocate(bufferSize);
     Path parent = absolute.getParent();
@@ -214,12 +225,18 @@ public class FTPFileSystem extends FileS
     final FTPClient client = connect();
     Path workDir = new Path(client.printWorkingDirectory());
     Path absolute = makeAbsolute(workDir, file);
-    if (exists(client, file)) {
-      if (overwrite) {
-        delete(client, file);
+    FileStatus status;
+    try {
+      status = getFileStatus(client, file);
+    } catch (FileNotFoundException fnfe) {
+      status = null;
+    }
+    if (status != null) {
+      if (overwrite && !status.isDirectory()) {
+        delete(client, file, false);
       } else {
         disconnect(client);
-        throw new IOException("File already exists: " + file);
+        throw new FileAlreadyExistsException("File already exists: " + file);
       }
     }
     
@@ -272,14 +289,13 @@ public class FTPFileSystem extends FileS
    * Convenience method, so that we don't open a new connection when using this
    * method from within another method. Otherwise every API invocation incurs
    * the overhead of opening/closing a TCP connection.
+   * @throws IOException on IO problems other than FileNotFoundException
    */
-  private boolean exists(FTPClient client, Path file) {
+  private boolean exists(FTPClient client, Path file) throws IOException {
     try {
       return getFileStatus(client, file) != null;
     } catch (FileNotFoundException fnfe) {
       return false;
-    } catch (IOException ioe) {
-      throw new FTPException("Failed to get file status", ioe);
     }
   }
 
@@ -294,12 +310,6 @@ public class FTPFileSystem extends FileS
     }
   }
 
-  /** @deprecated Use delete(Path, boolean) instead */
-  @Deprecated
-  private boolean delete(FTPClient client, Path file) throws IOException {
-    return delete(client, file, false);
-  }
-
   /**
    * Convenience method, so that we don't open a new connection when using this
    * method from within another method. Otherwise every API invocation incurs
@@ -310,9 +320,14 @@ public class FTPFileSystem extends FileS
     Path workDir = new Path(client.printWorkingDirectory());
     Path absolute = makeAbsolute(workDir, file);
     String pathName = absolute.toUri().getPath();
-    FileStatus fileStat = getFileStatus(client, absolute);
-    if (fileStat.isFile()) {
-      return client.deleteFile(pathName);
+    try {
+      FileStatus fileStat = getFileStatus(client, absolute);
+      if (fileStat.isFile()) {
+        return client.deleteFile(pathName);
+      }
+    } catch (FileNotFoundException e) {
+      //the file is not there
+      return false;
     }
     FileStatus[] dirEntries = listStatus(client, absolute);
     if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
@@ -491,7 +506,7 @@ public class FTPFileSystem extends FileS
         created = created && client.makeDirectory(pathName);
       }
     } else if (isFile(client, absolute)) {
-      throw new IOException(String.format(
+      throw new ParentNotDirectoryException(String.format(
           "Can't make directory for path %s since it is a file.", absolute));
     }
     return created;
@@ -528,6 +543,23 @@ public class FTPFileSystem extends FileS
   }
 
   /**
+   * Probe for a path being a parent of another
+   * @param parent parent path
+   * @param child possible child path
+   * @return true if the parent's path matches the start of the child's
+   */
+  private boolean isParentOf(Path parent, Path child) {
+    URI parentURI = parent.toUri();
+    String parentPath = parentURI.getPath();
+    if (!parentPath.endsWith("/")) {
+      parentPath += "/";
+    }
+    URI childURI = child.toUri();
+    String childPath = childURI.getPath();
+    return childPath.startsWith(parentPath);
+  }
+
+  /**
    * Convenience method, so that we don't open a new connection when using this
    * method from within another method. Otherwise every API invocation incurs
    * the overhead of opening/closing a TCP connection.
@@ -544,20 +576,31 @@ public class FTPFileSystem extends FileS
     Path absoluteSrc = makeAbsolute(workDir, src);
     Path absoluteDst = makeAbsolute(workDir, dst);
     if (!exists(client, absoluteSrc)) {
-      throw new IOException("Source path " + src + " does not exist");
+      throw new FileNotFoundException("Source path " + src + " does not exist");
+    }
+    if (isDirectory(absoluteDst)) {
+      // destination is a directory: rename goes underneath it with the
+      // source name
+      absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
     }
     if (exists(client, absoluteDst)) {
-      throw new IOException("Destination path " + dst
-          + " already exist, cannot rename!");
+      throw new FileAlreadyExistsException("Destination path " + dst
+          + " already exists");
     }
     String parentSrc = absoluteSrc.getParent().toUri().toString();
     String parentDst = absoluteDst.getParent().toUri().toString();
-    String from = src.getName();
-    String to = dst.getName();
+    if (isParentOf(absoluteSrc, absoluteDst)) {
+      throw new IOException("Cannot rename " + absoluteSrc + " under itself"
+      + " : "+ absoluteDst);
+    }
+
     if (!parentSrc.equals(parentDst)) {
-      throw new IOException("Cannot rename parent(source): " + parentSrc
-          + ", parent(destination):  " + parentDst);
+      throw new IOException("Cannot rename source: " + absoluteSrc
+          + " to " + absoluteDst
+          + " -"+ E_SAME_DIRECTORY_ONLY);
     }
+    String from = absoluteSrc.getName();
+    String to = absoluteDst.getName();
     client.changeWorkingDirectory(parentSrc);
     boolean renamed = client.rename(from, to);
     return renamed;

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java Thu Jul  3 12:04:50 2014
@@ -103,7 +103,7 @@ public class FTPInputStream extends FSIn
   @Override
   public synchronized void close() throws IOException {
     if (closed) {
-      throw new IOException("Stream closed");
+      return;
     }
     super.close();
     closed = true;

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java Thu Jul  3 12:04:50 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -226,7 +227,7 @@ public class S3FileSystem extends FileSy
       if (overwrite) {
         delete(file, true);
       } else {
-        throw new IOException("File already exists: " + file);
+        throw new FileAlreadyExistsException("File already exists: " + file);
       }
     } else {
       Path parent = file.getParent();

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java Thu Jul  3 12:04:50 2014
@@ -22,6 +22,7 @@ import static org.apache.hadoop.fs.s3nat
 
 import java.io.BufferedInputStream;
 import java.io.ByteArrayInputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -32,17 +33,19 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.s3.S3Credentials;
 import org.apache.hadoop.fs.s3.S3Exception;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.jets3t.service.S3Service;
 import org.jets3t.service.S3ServiceException;
 import org.jets3t.service.ServiceException;
 import org.jets3t.service.StorageObjectsChunk;
+import org.jets3t.service.impl.rest.HttpException;
 import org.jets3t.service.impl.rest.httpclient.RestS3Service;
 import org.jets3t.service.model.MultipartPart;
 import org.jets3t.service.model.MultipartUpload;
@@ -51,6 +54,8 @@ import org.jets3t.service.model.S3Object
 import org.jets3t.service.model.StorageObject;
 import org.jets3t.service.security.AWSCredentials;
 import org.jets3t.service.utils.MultipartUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -66,8 +71,8 @@ class Jets3tNativeFileSystemStore implem
 
   private String serverSideEncryptionAlgorithm;
   
-  public static final Log LOG =
-      LogFactory.getLog(Jets3tNativeFileSystemStore.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(Jets3tNativeFileSystemStore.class);
 
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
@@ -79,7 +84,7 @@ class Jets3tNativeFileSystemStore implem
             s3Credentials.getSecretAccessKey());
       this.s3Service = new RestS3Service(awsCredentials);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e);
     }
     multipartEnabled =
         conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
@@ -115,16 +120,10 @@ class Jets3tNativeFileSystemStore implem
         object.setMd5Hash(md5Hash);
       }
       s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+    } catch (ServiceException e) {
+      handleException(e, key);
     } finally {
-      if (in != null) {
-        try {
-          in.close();
-        } catch (IOException e) {
-          // ignore
-        }
-      }
+      IOUtils.closeStream(in);
     }
   }
 
@@ -147,10 +146,8 @@ class Jets3tNativeFileSystemStore implem
     try {
       mpUtils.uploadObjects(bucket.getName(), s3Service,
                             objectsToUploadAsMultipart, null);
-    } catch (ServiceException e) {
-      handleServiceException(e);
     } catch (Exception e) {
-      throw new S3Exception(e);
+      handleException(e, key);
     }
   }
   
@@ -163,8 +160,8 @@ class Jets3tNativeFileSystemStore implem
       object.setContentLength(0);
       object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+    } catch (ServiceException e) {
+      handleException(e, key);
     }
   }
 
@@ -172,20 +169,21 @@ class Jets3tNativeFileSystemStore implem
   public FileMetadata retrieveMetadata(String key) throws IOException {
     StorageObject object = null;
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
-      }
+      LOG.debug("Getting metadata for key: {} from bucket: {}",
+          key, bucket.getName());
       object = s3Service.getObjectDetails(bucket.getName(), key);
       return new FileMetadata(key, object.getContentLength(),
           object.getLastModifiedDate().getTime());
 
     } catch (ServiceException e) {
-      // Following is brittle. Is there a better way?
-      if ("NoSuchKey".equals(e.getErrorCode())) {
-        return null; //return null if key not found
+      try {
+        // process
+        handleException(e, key);
+        return null;
+      } catch (FileNotFoundException fnfe) {
+        // and downgrade missing files
+        return null;
       }
-      handleServiceException(e);
-      return null; //never returned - keep compiler happy
     } finally {
       if (object != null) {
         object.closeDataInputStream();
@@ -204,13 +202,12 @@ class Jets3tNativeFileSystemStore implem
   @Override
   public InputStream retrieve(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName());
-      }
+      LOG.debug("Getting key: {} from bucket: {}",
+          key, bucket.getName());
       S3Object object = s3Service.getObject(bucket.getName(), key);
       return object.getDataInputStream();
     } catch (ServiceException e) {
-      handleServiceException(key, e);
+      handleException(e, key);
       return null; //return null if key not found
     }
   }
@@ -228,15 +225,14 @@ class Jets3tNativeFileSystemStore implem
   public InputStream retrieve(String key, long byteRangeStart)
           throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName() + " with byteRangeStart: " + byteRangeStart);
-      }
+      LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
+          key, bucket.getName(), byteRangeStart);
       S3Object object = s3Service.getObject(bucket, key, null, null, null,
                                             null, byteRangeStart, null);
       return object.getDataInputStream();
     } catch (ServiceException e) {
-      handleServiceException(key, e);
-      return null; //return null if key not found
+      handleException(e, key);
+      return null;
     }
   }
 
@@ -254,17 +250,19 @@ class Jets3tNativeFileSystemStore implem
   }
 
   /**
-   *
-   * @return
-   * This method returns null if the list could not be populated
-   * due to S3 giving ServiceException
-   * @throws IOException
+   * list objects
+   * @param prefix prefix
+   * @param delimiter delimiter
+   * @param maxListingLength max no. of entries
+   * @param priorLastKey last key in any previous search
+   * @return a list of matches
+   * @throws IOException on any reported failure
    */
 
   private PartialListing list(String prefix, String delimiter,
       int maxListingLength, String priorLastKey) throws IOException {
     try {
-      if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
+      if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
         prefix += PATH_DELIMITER;
       }
       StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
@@ -279,24 +277,20 @@ class Jets3tNativeFileSystemStore implem
       }
       return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
           chunk.getCommonPrefixes());
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
-      return null; //never returned - keep compiler happy
     } catch (ServiceException e) {
-      handleServiceException(e);
-      return null; //return null if list could not be populated
+      handleException(e, prefix);
+      return null; // never returned - keep compiler happy
     }
   }
 
   @Override
   public void delete(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Deleting key:" + key + "from bucket" + bucket.getName());
-      }
+      LOG.debug("Deleting key: {} from bucket: {}",
+          key, bucket.getName());
       s3Service.deleteObject(bucket, key);
     } catch (ServiceException e) {
-      handleServiceException(key, e);
+      handleException(e, key);
     }
   }
 
@@ -304,7 +298,7 @@ class Jets3tNativeFileSystemStore implem
     try {
       s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
     } catch (ServiceException e) {
-      handleServiceException(e);
+      handleException(e, srcKey);
     }
   }
   
@@ -329,7 +323,7 @@ class Jets3tNativeFileSystemStore implem
       s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
           dstObject, false);
     } catch (ServiceException e) {
-      handleServiceException(srcKey, e);
+      handleException(e, srcKey);
     }
   }
 
@@ -364,19 +358,22 @@ class Jets3tNativeFileSystemStore implem
       Collections.reverse(listedParts);
       s3Service.multipartCompleteUpload(multipartUpload, listedParts);
     } catch (ServiceException e) {
-      handleServiceException(e);
+      handleException(e, srcObject.getKey());
     }
   }
 
   @Override
   public void purge(String prefix) throws IOException {
+    String key = "";
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+      S3Object[] objects =
+          s3Service.listObjects(bucket.getName(), prefix, null);
       for (S3Object object : objects) {
-        s3Service.deleteObject(bucket, object.getKey());
+        key = object.getKey();
+        s3Service.deleteObject(bucket, key);
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e, key);
     }
   }
 
@@ -390,39 +387,97 @@ class Jets3tNativeFileSystemStore implem
         sb.append(object.getKey()).append("\n");
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e);
     }
     System.out.println(sb);
   }
 
-  private void handleServiceException(String key, ServiceException e) throws IOException {
-    if ("NoSuchKey".equals(e.getErrorCode())) {
-      throw new FileNotFoundException("Key '" + key + "' does not exist in S3");
-    } else {
-      handleServiceException(e);
-    }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param e exception
+   * @throws IOException exception -always
+   */
+  private void handleException(Exception e) throws IOException {
+    throw processException(e, e, "");
   }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param e exception
+   * @param key key sought from object store
 
-  private void handleS3ServiceException(S3ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("S3 Error code: " + e.getS3ErrorCode() + "; S3 Error message: " + e.getS3ErrorMessage());
-      }
-      throw new S3Exception(e);
-    }
+   * @throws IOException exception -always
+   */
+  private void handleException(Exception e, String key) throws IOException {
+    throw processException(e, e, key);
   }
 
-  private void handleServiceException(ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
-      }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param thrown exception
+   * @param original original exception -thrown if no other translation could
+   * be made
+   * @param key key sought from object store or "" for undefined
+   * @return an exception to throw. If isProcessingCause==true this may be null.
+   */
+  private IOException processException(Throwable thrown, Throwable original,
+      String key) {
+    IOException result;
+    if (thrown.getCause() != null) {
+      // recurse down
+      result = processException(thrown.getCause(), original, key);
+    } else if (thrown instanceof HttpException) {
+      // nested HttpException - examine error code and react
+      HttpException httpException = (HttpException) thrown;
+      String responseMessage = httpException.getResponseMessage();
+      int responseCode = httpException.getResponseCode();
+      String bucketName = "s3n://" + bucket.getName();
+      String text = String.format("%s : %03d : %s",
+          bucketName,
+          responseCode,
+          responseMessage);
+      String filename = !key.isEmpty() ? (bucketName + "/" + key) : text;
+      IOException ioe;
+      switch (responseCode) {
+        case 404:
+          result = new FileNotFoundException(filename);
+          break;
+        case 416: // invalid range
+          result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+                                    +": " + filename);
+          break;
+        case 403: //forbidden
+          result = new AccessControlException("Permission denied"
+                                    +": " + filename);
+          break;
+        default:
+          result = new IOException(text);
+      }
+      result.initCause(thrown);
+    } else if (thrown instanceof S3ServiceException) {
+      S3ServiceException se = (S3ServiceException) thrown;
+      LOG.debug(
+          "S3ServiceException: {}: {} : {}",
+          se.getS3ErrorCode(), se.getS3ErrorMessage(), se, se);
+      if ("InvalidRange".equals(se.getS3ErrorCode())) {
+        result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+      } else {
+        result = new S3Exception(se);
+      }
+    } else if (thrown instanceof ServiceException) {
+      ServiceException se = (ServiceException) thrown;
+      LOG.debug("S3ServiceException: {}: {} : {}",
+          se.getErrorCode(), se.toString(), se, se);
+      result = new S3Exception(se);
+    } else if (thrown instanceof IOException) {
+      result = (IOException) thrown;
+    } else {
+      // here there is no exception derived yet.
+      // this means no inner cause, and no translation made yet.
+      // convert the original to an IOException -rather than just the
+      // exception at the base of the tree
+      result = new S3Exception(original);
     }
+
+    return result;
   }
 }

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java?rev=1607596&r1=1607595&r2=1607596&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java Thu Jul  3 12:04:50 2014
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.s3native;
 
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -37,15 +38,16 @@ import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -55,6 +57,8 @@ import org.apache.hadoop.io.retry.RetryP
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -81,8 +85,8 @@ import org.apache.hadoop.util.Progressab
 @InterfaceStability.Stable
 public class NativeS3FileSystem extends FileSystem {
   
-  public static final Log LOG = 
-    LogFactory.getLog(NativeS3FileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(NativeS3FileSystem.class);
   
   private static final String FOLDER_SUFFIX = "_$folder$";
   static final String PATH_DELIMITER = Path.SEPARATOR;
@@ -97,6 +101,7 @@ public class NativeS3FileSystem extends 
     private long pos = 0;
     
     public NativeS3FsInputStream(NativeFileSystemStore store, Statistics statistics, InputStream in, String key) {
+      Preconditions.checkNotNull(in, "Null input stream");
       this.store = store;
       this.statistics = statistics;
       this.in = in;
@@ -105,13 +110,20 @@ public class NativeS3FileSystem extends 
     
     @Override
     public synchronized int read() throws IOException {
-      int result = -1;
+      int result;
       try {
         result = in.read();
       } catch (IOException e) {
-        LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
-        seek(pos);
-        result = in.read();
+        LOG.info("Received IOException while reading '{}', attempting to reopen",
+            key);
+        LOG.debug("{}", e, e);
+        try {
+          seek(pos);
+          result = in.read();
+        } catch (EOFException eof) {
+          LOG.debug("EOF on input stream read: {}", eof, eof);
+          result = -1;
+        }
       } 
       if (result != -1) {
         pos++;
@@ -124,12 +136,17 @@ public class NativeS3FileSystem extends 
     @Override
     public synchronized int read(byte[] b, int off, int len)
       throws IOException {
-      
+      if (in == null) {
+        throw new EOFException("Cannot read closed stream");
+      }
       int result = -1;
       try {
         result = in.read(b, off, len);
+      } catch (EOFException eof) {
+        throw eof;
       } catch (IOException e) {
-        LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
+        LOG.info( "Received IOException while reading '{}'," +
+                  " attempting to reopen.", key);
         seek(pos);
         result = in.read(b, off, len);
       }
@@ -143,17 +160,53 @@ public class NativeS3FileSystem extends 
     }
 
     @Override
-    public void close() throws IOException {
-      in.close();
+    public synchronized void close() throws IOException {
+      closeInnerStream();
+    }
+
+    /**
+     * Close the inner stream if not null. Even if an exception
+     * is raised during the close, the field is set to null
+     * @throws IOException if raised by the close() operation.
+     */
+    private void closeInnerStream() throws IOException {
+      if (in != null) {
+        try {
+          in.close();
+        } finally {
+          in = null;
+        }
+      }
+    }
+
+    /**
+     * Update inner stream with a new stream and position
+     * @param newStream new stream -must not be null
+     * @param newpos new position
+     * @throws IOException IO exception on a failure to close the existing
+     * stream.
+     */
+    private synchronized void updateInnerStream(InputStream newStream, long newpos) throws IOException {
+      Preconditions.checkNotNull(newStream, "Null newstream argument");
+      closeInnerStream();
+      in = newStream;
+      this.pos = newpos;
     }
 
     @Override
-    public synchronized void seek(long pos) throws IOException {
-      in.close();
-      LOG.info("Opening key '" + key + "' for reading at position '" + pos + "'");
-      in = store.retrieve(key, pos);
-      this.pos = pos;
+    public synchronized void seek(long newpos) throws IOException {
+      if (newpos < 0) {
+        throw new EOFException(
+            FSExceptionMessages.NEGATIVE_SEEK);
+      }
+      if (pos != newpos) {
+        // the seek is attempting to move the current position
+        LOG.debug("Opening key '{}' for reading at position '{}", key, newpos);
+        InputStream newStream = store.retrieve(key, newpos);
+        updateInnerStream(newStream, newpos);
+      }
     }
+
     @Override
     public synchronized long getPos() throws IOException {
       return pos;
@@ -214,7 +267,7 @@ public class NativeS3FileSystem extends 
       }
 
       backupStream.close();
-      LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
+      LOG.info("OutputStream for key '{}' closed. Now beginning upload", key);
       
       try {
         byte[] md5Hash = digest == null ? null : digest.digest();
@@ -226,7 +279,7 @@ public class NativeS3FileSystem extends 
         super.close();
         closed = true;
       } 
-      LOG.info("OutputStream for key '" + key + "' upload complete");
+      LOG.info("OutputStream for key '{}' upload complete", key);
     }
 
     @Override
@@ -339,7 +392,7 @@ public class NativeS3FileSystem extends 
       Progressable progress) throws IOException {
 
     if (exists(f) && !overwrite) {
-      throw new IOException("File already exists:"+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     
     if(LOG.isDebugEnabled()) {
@@ -367,7 +420,7 @@ public class NativeS3FileSystem extends 
     String key = pathToKey(absolutePath);
     if (status.isDirectory()) {
       if (!recurse && listStatus(f).length > 0) {
-        throw new IOException("Can not delete " + f + " at is a not empty directory and recurse option is false");
+        throw new IOException("Can not delete " + f + " as is a not empty directory and recurse option is false");
       }
 
       createParent(f);
@@ -538,7 +591,7 @@ public class NativeS3FileSystem extends 
     try {
       FileStatus fileStatus = getFileStatus(f);
       if (fileStatus.isFile()) {
-        throw new IOException(String.format(
+        throw new FileAlreadyExistsException(String.format(
             "Can't make directory for path '%s' since it is a file.", f));
 
       }
@@ -556,7 +609,7 @@ public class NativeS3FileSystem extends 
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
     if (fs.isDirectory()) {
-      throw new IOException("'" + f + "' is a directory");
+      throw new FileNotFoundException("'" + f + "' is a directory");
     }
     LOG.info("Opening '" + f + "' for reading");
     Path absolutePath = makeAbsolute(f);

Added: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/extending.md
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/extending.md?rev=1607596&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/extending.md (added)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/extending.md Thu Jul  3 12:04:50 2014
@@ -0,0 +1,95 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Extending the File System specification and its tests
+
+The FileSystem specification is incomplete. It doesn't cover all operations or
+even interfaces and classes in the FileSystem APIs. There may
+be some minor issues with those that it does cover, such
+as corner cases, failure modes, and other unexpected outcomes. It may also be that
+a standard FileSystem significantly diverges from the specification, and
+it is felt that this needs to be documented and coped with in tests.
+
+Finally, the FileSystem classes and methods are not fixed forever.
+They may be extended with new operations on existing classes, as well as
+potentially entirely new classes and interfaces.
+
+Accordingly, do not view this specification as a complete static document,
+any more than the rest of the Hadoop code.
+
+1. View it as a live document to accompany the reference implementation (HDFS),
+and the tests used to validate filesystems.
+1. Don't be afraid to extend or correct it.
+1. If you are proposing enhancements to the FileSystem APIs, you should extend the
+specification to match.
+
+## How to update this specification
+
+1. Although found in the `hadoop-common` codebase, the HDFS team has ownership of
+the FileSystem and FileContext APIs. Work with them on the hdfs-dev mailing list.
+
+1. Create JIRA issues in the `HADOOP` project, component `fs`, to cover changes
+in the APIs and/or specification.
+
+1. Code changes will of course require tests. Ideally, changes to the specification
+itself are accompanied by new tests.
+
+1. If the change involves operations that already have an `Abstract*ContractTest`,
+add new test methods to the class and verify that they work on filesystem-specific
+tests that subclass it. That includes the object stores as well as the local and
+HDFS filesystems.
+
+1. If the changes add a new operation, add a new abstract test class
+with the same contract-driven architecture as the existing one, and an implementation
+subclass for all filesystems that support the operation.
+
+1. Add test methods to verify that invalid preconditions result in the expected
+failures.
+
+1. Add test methods to verify that valid preconditions result in the expected
+final state of the filesystem. Testing as little as possible per test aids
+in tracking down problems.
+
+1. If possible, add tests to show concurrency expectations.
+
+If a FileSystem fails a newly added test, then it may be because:
+
+* The specification is wrong.
+* The test is wrong.
+* The test is looking for the wrong exception (i.e. it is too strict).
+* The specification and tests are correct -and it is the filesystem is not
+consistent with expectations.
+
+HDFS has to be treated as correct in its behavior.
+If the test and specification do not match this behavior, then the specification
+needs to be updated. Even so, there may be cases where the FS could be changed:
+
+1. The exception raised is a generic `IOException`, when a more informative
+subclass, such as `EOFException` can be raised.
+1. The FileSystem does not fail correctly when passed an invalid set of arguments.
+This MAY be correctable, though must be done cautiously.
+
+If the mismatch is in LocalFileSystem, then it probably can't be corrected, as
+this is the native filesystem as accessed via the Java IO APIs.
+
+For other FileSystems, their behaviour MAY be updated to more accurately reflect
+the behavior of HDFS and/or LocalFileSystem. For most operations this is straightforward,
+though the semantics of `rename()` are complicated enough that it is not clear
+that HDFS is the correct reference.
+
+If a test fails and it is felt that it is a unfixable FileSystem-specific issue, then
+a new contract option to allow for different interpretations of the results should
+be added to the `ContractOptions` interface, the test modified to react to the
+presence/absence of the option, and the XML contract files for the standard
+FileSystems updated to indicate when a feature/failure mode is present.

Added: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md?rev=1607596&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md (added)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md Thu Jul  3 12:04:50 2014
@@ -0,0 +1,802 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+
+<!--  ============================================================= -->
+<!--  CLASS: FileSystem -->
+<!--  ============================================================= -->
+
+# class `org.apache.hadoop.fs.FileSystem`
+
+The abstract `FileSystem` class is the original class to access Hadoop filesystems;
+non-abstract subclasses exist for all Hadoop-supported filesystems.
+
+All operations that take a Path to this interface MUST support relative paths.
+In such a case, they must be resolved relative to the working directory
+defined by `setWorkingDirectory()`.
+
+For all clients, therefore, we also add the notion of a state component PWD:
+this represents the present working directory of the client. Changes to this
+state are not reflected in the filesystem itself: they are unique to the instance
+of the client.
+
+**Implementation Note**: the static `FileSystem get(URI uri, Configuration conf) ` method MAY return
+a pre-existing instance of a filesystem client class&mdash;a class that may also be in use in other threads. The implementations of `FileSystem` which ship with Apache Hadoop *do not make any attempt to synchronize access to the working directory field*.
+
+## Invariants
+
+All the requirements of a valid FileSystem are considered implicit preconditions and postconditions:
+all operations on a valid FileSystem MUST result in a new FileSystem that is also valid.
+
+
+## Predicates and other state access operations
+
+
+### `boolean exists(Path p)`
+
+
+    def exists(FS, p) = p in paths(FS)
+
+
+### `boolean isDirectory(Path p)`
+
+    def isDirectory(FS, p)= p in directories(FS)
+
+
+### `boolean isFile(Path p)`
+
+
+    def isFile(FS, p) = p in files(FS)
+
+###  `boolean isSymlink(Path p)`
+
+
+    def isSymlink(FS, p) = p in symlinks(FS)
+
+
+### `FileStatus getFileStatus(Path p)`
+
+Get the status of a path
+
+#### Preconditions
+
+
+    if not exists(FS, p) : raise FileNotFoundException
+
+#### Postconditions
+
+
+    result = stat: FileStatus where:
+        if isFile(FS, p) :
+            stat.length = len(FS.Files[p])
+            stat.isdir = False
+        elif isDir(FS, p) :
+            stat.length = 0
+            stat.isdir = True
+        elif isSymlink(FS, p) :
+            stat.length = 0
+            stat.isdir = False
+            stat.symlink = FS.Symlinks[p]
+
+### `Path getHomeDirectory()`
+
+The function `getHomeDirectory` returns the home directory for the FileSystem
+and the current user account.
+
+For some FileSystems, the path is `["/", "users", System.getProperty("user-name")]`.
+
+However, for HDFS, the username is derived from the credentials used to authenticate the client with HDFS. This
+may differ from the local user account name.
+
+**It is the responsibility of the FileSystem to determine the actual home directory
+of the caller.**
+
+
+#### Preconditions
+
+
+#### Postconditions
+
+    result = p where valid-path(FS, p)
+
+There is no requirement that the path exists at the time the method was called,
+or, if it exists, that it points to a directory. However, code tends to assume
+that `not isFile(FS, getHomeDirectory())` holds to the extent that follow-on
+code may fail.
+
+#### Implementation Notes
+
+* The FTPFileSystem queries this value from the remote filesystem and may
+fail with a RuntimeException or subclass thereof if there is a connectivity
+problem. The time to execute the operation is not bounded.
+
+### `FileSystem.listStatus(Path, PathFilter )`
+
+A `PathFilter` `f` is a predicate function that returns true iff the path `p`
+meets the filter's conditions.
+
+#### Preconditions
+
+Path must exist:
+
+    if not exists(FS, p) : raise FileNotFoundException
+
+#### Postconditions
+
+
+    if isFile(FS, p) and f(p) :
+        result = [getFileStatus(p)]
+
+    elif isFile(FS, p) and not f(P) :
+        result = []
+
+    elif isDir(FS, p):
+       result [getFileStatus(c) for c in children(FS, p) where f(c) == True]
+
+
+**Implicit invariant**: the contents of a `FileStatus` of a child retrieved
+via `listStatus()` are equal to those from a call of `getFileStatus()`
+to the same path:
+
+    forall fs in listStatus(Path) :
+      fs == getFileStatus(fs.path)
+
+
+### Atomicity and Consistency
+
+By the time the `listStatus()` operation returns to the caller, there
+is no guarantee that the information contained in the response is current.
+The details MAY be out of date, including the contents of any directory, the
+attributes of any files, and the existence of the path supplied.
+
+The state of a directory MAY change during the evaluation
+process. This may be reflected in a listing that is split between the pre-
+and post-update FileSystem states.
+
+
+* After an entry at path `P` is created, and before any other
+ changes are made to the FileSystem, `listStatus(P)` MUST
+ find the file and return its status.
+
+* After an entry at path `P` is deleted, `listStatus(P)`  MUST
+ raise a `FileNotFoundException`.
+
+* After an entry at path `P` is created, and before any other
+ changes are made to the FileSystem, the result of `listStatus(parent(P))` SHOULD
+ include the value of `getFileStatus(P)`.
+
+* After an entry at path `P` is created, and before any other
+ changes are made to the FileSystem, the result of `listStatus(parent(P))` SHOULD
+ NOT include the value of `getFileStatus(P)`.
+
+This is not a theoretical possibility, it is observable in HDFS when a
+directory contains many thousands of files.
+
+Consider a directory "d" with the contents:
+
+	a
+	part-0000001
+	part-0000002
+	...
+	part-9999999
+
+
+If the number of files is such that HDFS returns a partial listing in each
+response, then, if a listing `listStatus("d")` takes place concurrently with the operation
+`rename("d/a","d/z"))`, the result may be one of:
+
+	[a, part-0000001, ... , part-9999999]
+	[part-0000001, ... , part-9999999, z]
+
+	[a, part-0000001, ... , part-9999999, z]
+	[part-0000001, ... , part-9999999]
+
+While this situation is likely to be a rare occurrence, it MAY happen. In HDFS
+these inconsistent views are only likely when listing a directory with many children.
+
+Other filesystems may have stronger consistency guarantees, or return inconsistent
+data more readily.
+
+### ` List[BlockLocation] getFileBlockLocations(FileStatus f, int s, int l)`
+
+#### Preconditions
+
+    if s < 0 or l < 0 : raise {HadoopIllegalArgumentException, InvalidArgumentException}
+
+* HDFS throws `HadoopIllegalArgumentException` for an invalid offset
+or length; this extends `IllegalArgumentException`.
+
+#### Postconditions
+
+If the filesystem is location aware, it must return the list
+of block locations where the data in the range `[s:s+l]` can be found.
+
+
+    if f == null :
+        result = null
+    elif f.getLen()) <= s
+        result = []
+    else result = [ locations(FS, b) for all b in blocks(FS, p, s, s+l)]
+
+where
+
+      def locations(FS, b) = a list of all locations of a block in the filesystem
+
+      def blocks(FS, p, s, s +  l)  = a list of the blocks containing  data(FS, path)[s:s+l]
+
+
+Note that that as `length(FS, f) ` is defined as 0 if `isDir(FS, f)`, the result
+of `getFileBlockLocations()` on a directory is []
+
+
+If the filesystem is not location aware, it SHOULD return
+
+      [
+        BlockLocation(["localhost:50010"] ,
+                  ["localhost"],
+                  ["/default/localhost"]
+                   0, F.getLen())
+       ] ;
+
+
+*A bug in Hadoop 1.0.3 means that a topology path of the same number
+of elements as the cluster topology MUST be provided, hence Filesystems SHOULD
+return that `"/default/localhost"` path
+
+
+###  `getFileBlockLocations(Path P, int S, int L)`
+
+#### Preconditions
+
+
+    if p == null : raise NullPointerException
+    if not exists(FS, p) : raise FileNotFoundException
+
+
+#### Postconditions
+
+    result = getFileBlockLocations(getStatus(P), S, L)
+
+
+###  `getDefaultBlockSize()`
+
+#### Preconditions
+
+#### Postconditions
+
+    result = integer >= 0
+
+Although there is no defined minimum value for this result, as it
+is used to partition work during job submission, a block size
+that is too small will result in either too many jobs being submitted
+for efficient work, or the `JobSubmissionClient` running out of memory.
+
+
+Any FileSystem that does not actually break files into blocks SHOULD
+return a number for this that results in efficient processing.
+A FileSystem MAY make this user-configurable (the S3 and Swift filesystem clients do this).
+
+###  `getDefaultBlockSize(Path P)`
+
+#### Preconditions
+
+
+#### Postconditions
+
+
+    result = integer  >= 0
+
+The outcome of this operation is usually identical to `getDefaultBlockSize()`,
+with no checks for the existence of the given path.
+
+Filesystems that support mount points may have different default values for
+different paths, in which case the specific default value for the destination path
+SHOULD be returned.
+
+
+###  `getBlockSize(Path P)`
+
+#### Preconditions
+
+    if not exists(FS, p) :  raise FileNotFoundException
+
+
+#### Postconditions
+
+
+    result == getFileStatus(P).getBlockSize()
+
+The outcome of this operation MUST be identical to that  contained in
+the `FileStatus` returned from `getFileStatus(P)`.
+
+
+## State Changing Operations
+
+### `boolean mkdirs(Path p, FsPermission permission )`
+
+Create a directory and all its parents
+
+#### Preconditions
+
+
+     if exists(FS, p) and not isDir(FS, p) :
+         raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException]
+
+
+#### Postconditions
+
+
+    FS' where FS'.Directories' = FS.Directories + [p] + ancestors(FS, p)
+    result = True
+
+
+The condition exclusivity requirement of a FileSystem's directories,
+files and symbolic links must hold.
+
+The probe for the existence and type of a path and directory creation MUST be
+atomic. The combined operation, including `mkdirs(parent(F))` MAY be atomic.
+
+The return value is always true&mdash;even if a new directory is not created
+ (this is defined in HDFS).
+
+#### Implementation Notes: Local FileSystem
+
+The local FileSystem does not raise an exception if `mkdirs(p)` is invoked
+on a path that exists and is a file. Instead the operation returns false.
+
+    if isFile(FS, p):
+       FS' = FS
+       result = False
+
+### `FSDataOutputStream create(Path, ...)`
+
+
+    FSDataOutputStream create(Path p,
+          FsPermission permission,
+          boolean overwrite,
+          int bufferSize,
+          short replication,
+          long blockSize,
+          Progressable progress) throws IOException;
+
+
+#### Preconditions
+
+The file must not exist for a no-overwrite create:
+
+    if not overwrite and isFile(FS, p)  : raise FileAlreadyExistsException
+
+Writing to or overwriting a directory must fail.
+
+    if isDir(FS, p) : raise {FileAlreadyExistsException, FileNotFoundException, IOException}
+
+
+FileSystems may reject the request for other
+reasons, such as the FS being read-only  (HDFS),
+the block size being below the minimum permitted (HDFS),
+the replication count being out of range (HDFS),
+quotas on namespace or filesystem being exceeded, reserved
+names, etc. All rejections SHOULD be `IOException` or a subclass thereof
+and MAY be a `RuntimeException` or subclass. For instance, HDFS may raise a `InvalidPathException`.
+
+#### Postconditions
+
+    FS' where :
+       FS'.Files'[p] == []
+       ancestors(p) is-subset-of FS'.Directories'
+
+    result = FSDataOutputStream
+
+The updated (valid) FileSystem must contains all the parent directories of the path, as created by `mkdirs(parent(p))`.
+
+The result is `FSDataOutputStream`, which through its operations may generate new filesystem states with updated values of
+`FS.Files[p]`
+
+#### Implementation Notes
+
+* Some implementations split the create into a check for the file existing
+ from the
+ actual creation. This means the operation is NOT atomic: it is possible for
+ clients creating files with `overwrite==true` to fail if the file is created
+ by another client between the two tests.
+
+* S3N, Swift and potentially other Object Stores do not currently change the FS state
+until the output stream `close()` operation is completed.
+This MAY be a bug, as it allows >1 client to create a file with `overwrite==false`,
+ and potentially confuse file/directory logic
+
+* The Local FileSystem raises a `FileNotFoundException` when trying to create a file over
+a directory, hence it is is listed as an exception that MAY be raised when
+this precondition fails.
+
+* Not covered: symlinks. The resolved path of the symlink is used as the final path argument to the `create()` operation
+
+### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
+
+Implementations MAY throw `UnsupportedOperationException`.
+
+#### Preconditions
+
+    if not exists(FS, p) : raise FileNotFoundException
+
+    if not isFile(FS, p) : raise [FileNotFoundException, IOException]
+
+#### Postconditions
+
+    FS
+    result = FSDataOutputStream
+
+Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]`
+by appending data to the existing list.
+
+
+### `FSDataInputStream open(Path f, int bufferSize)`
+
+Implementations MAY throw `UnsupportedOperationException`.
+
+#### Preconditions
+
+    if not isFile(FS, p)) : raise [FileNotFoundException, IOException]
+
+This is a critical precondition. Implementations of some FileSystems (e.g.
+Object stores) could shortcut one round trip by postponing their HTTP GET
+operation until the first `read()` on the returned `FSDataInputStream`.
+However, much client code does depend on the existence check being performed
+at the time of the `open()` operation. Implementations MUST check for the
+presence of the file at the time of creation. This does not imply that
+the file and its data is still at the time of the following `read()` or
+any successors.
+
+#### Postconditions
+
+    result = FSDataInputStream(0, FS.Files[p])
+
+The result provides access to the byte array defined by `FS.Files[p]`; whether that
+access is to the contents at the time the `open()` operation was invoked,
+or whether and how it may pick up changes to that data in later states of FS is
+an implementation detail.
+
+The result MUST be the same for local and remote callers of the operation.
+
+
+#### HDFS implementation notes
+
+1. HDFS MAY throw `UnresolvedPathException` when attempting to traverse
+symbolic links
+
+1. HDFS throws `IOException("Cannot open filename " + src)` if the path
+exists in the metadata, but no copies of any its blocks can be located;
+-`FileNotFoundException` would seem more accurate and useful.
+
+
+### `FileSystem.delete(Path P, boolean recursive)`
+
+#### Preconditions
+
+A directory with children and recursive == false cannot be deleted
+
+    if isDir(FS, p) and not recursive and (children(FS, p) != {}) : raise IOException
+
+
+#### Postconditions
+
+
+##### Nonexistent path
+
+If the file does not exist the FS state does not change
+
+    if not exists(FS, p):
+        FS' = FS
+        result = False
+
+The result SHOULD be `False`, indicating that no file was deleted.
+
+
+##### Simple File
+
+
+A path referring to a file is removed, return value: `True`
+
+    if isFile(FS, p) :
+        FS' = (FS.Directories, FS.Files - [p], FS.Symlinks)
+        result = True
+
+
+##### Empty root directory
+
+Deleting an empty root does not change the filesystem state
+and may return true or false.
+
+    if isDir(FS, p) and isRoot(p) and children(FS, p) == {} :
+        FS ' = FS
+        result = (undetermined)
+
+There is no consistent return code from an attempt to delete the root directory.
+
+##### Empty (non-root) directory
+
+Deleting an empty directory that is not root will remove the path from the FS and
+return true.
+
+    if isDir(FS, p) and not isRoot(p) and children(FS, p) == {} :
+        FS' = (FS.Directories - [p], FS.Files, FS.Symlinks)
+        result = True
+
+
+##### Recursive delete of root directory
+
+Deleting a root path with children and `recursive==True`
+ can do one of two things.
+
+The POSIX model assumes that if the user has
+the correct permissions to delete everything,
+they are free to do so (resulting in an empty filesystem).
+
+    if isDir(FS, p) and isRoot(p) and recursive :
+        FS' = ({["/"]}, {}, {}, {})
+        result = True
+
+In contrast, HDFS never permits the deletion of the root of a filesystem; the
+filesystem can be taken offline and reformatted if an empty
+filesystem is desired.
+
+    if isDir(FS, p) and isRoot(p) and recursive :
+        FS' = FS
+        result = False
+
+##### Recursive delete of non-root directory
+
+Deleting a non-root path with children `recursive==true`
+removes the path and all descendants
+
+    if isDir(FS, p) and not isRoot(p) and recursive :
+        FS' where:
+            not isDir(FS', p)
+            and forall d in descendants(FS, p):
+                not isDir(FS', d)
+                not isFile(FS', d)
+                not isSymlink(FS', d)
+        result = True
+
+#### Atomicity
+
+* Deleting a file MUST be an atomic action.
+
+* Deleting an empty directory MUST be an atomic action.
+
+* A recursive delete of a directory tree MUST be atomic.
+
+#### Implementation Notes
+
+* S3N, Swift, FTP and potentially other non-traditional FileSystems
+implement `delete()` as recursive listing and file delete operation.
+This can break the expectations of client applications -and means that
+they cannot be used as drop-in replacements for HDFS.
+
+<!--  ============================================================= -->
+<!--  METHOD: rename() -->
+<!--  ============================================================= -->
+
+
+### `FileSystem.rename(Path src, Path d)`
+
+In terms of its specification, `rename()` is one of the most complex operations within a filesystem .
+
+In terms of its implementation, it is the one with the most ambiguity regarding when to return false
+versus raising an exception.
+
+Rename includes the calculation of the destination path.
+If the destination exists and is a directory, the final destination
+of the rename becomes the destination + the filename of the source path.
+
+    let dest = if (isDir(FS, src) and d != src) :
+            d + [filename(src)]
+        else :
+            d
+
+#### Preconditions
+
+All checks on the destination path MUST take place after the final `dest` path
+has been calculated.
+
+Source `src` must exist:
+
+    exists(FS, src) else raise FileNotFoundException
+
+
+`dest` cannot be a descendant of `src`:
+
+    if isDescendant(FS, src, dest) : raise IOException
+
+This implicitly covers the special case of `isRoot(FS, src)`.
+
+`dest` must be root, or have a parent that exists:
+
+    isRoot(FS, dest) or exists(FS, parent(dest)) else raise IOException
+
+The parent path of a destination must not be a file:
+
+    if isFile(FS, parent(dest)) : raise IOException
+
+This implicitly covers all the ancestors of the parent.
+
+There must not be an existing file at the end of the destination path:
+
+    if isFile(FS, dest) : raise FileAlreadyExistsException, IOException
+
+
+#### Postconditions
+
+
+##### Renaming a directory onto itself
+
+Renaming a directory onto itself is no-op; return value is not specified.
+
+In POSIX the result is `False`;  in HDFS the result is `True`.
+
+    if isDir(FS, src) and src == dest :
+        FS' = FS
+        result = (undefined)
+
+
+##### Renaming a file to self
+
+Renaming a file to itself is a no-op; the result is `True`.
+
+     if isFile(FS, src) and src == dest :
+         FS' = FS
+         result = True
+
+
+##### Renaming a file onto a nonexistent path
+
+Renaming a file where the destination is a directory moves the file as a child
+ of the destination directory, retaining the filename element of the source path.
+
+    if isFile(FS, src) and src != dest:
+        FS' where:
+            not exists(FS', src)
+            and exists(FS', dest)
+            and data(FS', dest) == data (FS, dest)
+        result = True
+
+
+
+##### Renaming a directory onto a directory
+
+If `src` is a directory then all its children will then exist under `dest`, while the path
+`src` and its descendants will no longer not exist. The names of the paths under
+`dest` will match those under `src`, as will the contents:
+
+    if isDir(FS, src) isDir(FS, dest) and src != dest :
+        FS' where:
+            not exists(FS', src)
+            and dest in FS'.Directories]
+            and forall c in descendants(FS, src) :
+                not exists(FS', c))
+            and forall c in descendants(FS, src) where isDir(FS, c):
+                isDir(FS', dest + childElements(src, c)
+            and forall c in descendants(FS, src) where not isDir(FS, c):
+                    data(FS', dest + childElements(s, c)) == data(FS, c)
+        result = True
+
+##### Renaming into a path where the parent path does not exist
+
+      not exists(FS, parent(dest))
+
+There is no consistent behavior here.
+
+*HDFS*
+
+The outcome is no change to FileSystem state, with a return value of false.
+
+    FS' = FS; result = False
+
+*Local Filesystem, S3N*
+
+The outcome is as a normal rename, with the additional (implicit) feature
+that the parent directores of the destination also exist
+
+    exists(FS', parent(dest))
+
+*Other Filesystems (including Swift) *
+
+Other filesystems strictly reject the operation, raising a `FileNotFoundException`
+
+##### Concurrency requirements
+
+* The core operation of `rename()`&mdash;moving one entry in the filesystem to
+another&mdash;MUST be atomic. Some applications rely on this as a way to coordinate access to data.
+
+* Some FileSystem implementations perform checks on the destination
+FileSystem before and after the rename. One example of this is `ChecksumFileSystem`, which
+provides checksummed access to local data. The entire sequence MAY NOT be atomic.
+
+##### Implementation Notes
+
+**Files open for reading, writing or appending**
+
+The behavior of `rename()` on an open file is unspecified: whether it is
+allowed, what happens to later attempts to read from or write to the open stream
+
+**Renaming a directory onto itself**
+
+The return code of renaming a directory onto itself is unspecified.
+
+**Destination exists and is a file**
+
+Renaming a file atop an existing file is specified as failing, raising an exception.
+
+* Local FileSystem : the rename succeeds; the destination file is replaced by the source file.
+
+* HDFS : The rename fails, no exception is raised. Instead the method call simply returns false.
+
+**Missing source file**
+
+If the source file `src` does not exist,  `FileNotFoundException` should be raised.
+
+HDFS fails without raising an exception; `rename()` merely returns false.
+
+    FS' = FS
+    result = false
+
+The behavior of HDFS here should not be considered a feature to replicate.
+`FileContext` explicitly changed the behavior to raise an exception, and the retrofitting of that action
+to the `DFSFileSystem` implementation is an ongoing matter for debate.
+
+
+### `concat(Path p, Path sources[])`
+
+Joins multiple blocks together to create a single file. This
+is a little-used operation currently implemented only by HDFS.
+
+Implementations MAY throw `UnsupportedOperationException`
+
+#### Preconditions
+
+    if not exists(FS, p) : raise FileNotFoundException
+
+    if sources==[] : raise IllegalArgumentException
+
+All sources MUST be in the same directory:
+
+    for s in sources: if parent(S) != parent(p) raise IllegalArgumentException
+
+All block sizes must match that of the target:
+
+    for s in sources: getBlockSize(FS, S) == getBlockSize(FS, p)
+
+No duplicate paths:
+
+    not (exists p1, p2 in (sources + [p]) where p1 == p2)
+
+HDFS: All source files except the final one MUST be a complete block:
+
+    for s in (sources[0:length(sources)-1] + [p]):
+      (length(FS, s) mod getBlockSize(FS, p)) == 0
+
+
+#### Postconditions
+
+
+    FS' where:
+     (data(FS', T) = data(FS, T) + data(FS, sources[0]) + ... + data(FS, srcs[length(srcs)-1]))
+     and for s in srcs: not exists(FS', S)
+
+
+HDFS's restrictions may be an implementation detail of how it implements
+`concat` -by changing the inode references to join them together in
+a sequence. As no other filesystem in the Hadoop core codebase
+implements this method, there is no way to distinguish implementation detail.
+from specification.