You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2009/11/28 20:53:40 UTC

svn commit: r885142 [3/6] - in /hadoop/common/branches/HADOOP-6194: ./ .eclipse.templates/ bin/ ivy/ lib/jdiff/ src/ src/contrib/ src/contrib/ec2/ src/docs/ src/docs/src/documentation/ src/docs/src/documentation/content/xdocs/ src/docs/src/documentatio...

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSDataOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSDataOutputStream.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSDataOutputStream.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSDataOutputStream.java Sat Nov 28 19:53:33 2009
@@ -91,10 +91,29 @@
     return wrappedStream;
   }
 
-  /** {@inheritDoc} */
+  @Override  // Syncable
+  @Deprecated
   public void sync() throws IOException {
     if (wrappedStream instanceof Syncable) {
       ((Syncable)wrappedStream).sync();
     }
   }
+  
+  @Override  // Syncable
+  public void hflush() throws IOException {
+    if (wrappedStream instanceof Syncable) {
+      ((Syncable)wrappedStream).hflush();
+    } else {
+      wrappedStream.flush();
+    }
+  }
+  
+  @Override  // Syncable
+  public void hsync() throws IOException {
+    if (wrappedStream instanceof Syncable) {
+      ((Syncable)wrappedStream).hsync();
+    } else {
+      wrappedStream.flush();
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSInputChecker.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSInputChecker.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FSInputChecker.java Sat Nov 28 19:53:33 2009
@@ -296,12 +296,12 @@
   
   @Override
   public synchronized long getPos() throws IOException {
-    return chunkPos-(count-pos);
+    return chunkPos-Math.max(0L, count - pos);
   }
 
   @Override
   public synchronized int available() throws IOException {
-    return count-pos;
+    return Math.max(0, count - pos);
   }
   
   /**

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileStatus.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileStatus.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileStatus.java Sat Nov 28 19:53:33 2009
@@ -148,6 +148,10 @@
   public Path getPath() {
     return path;
   }
+  
+  public void setPath(final Path p) {
+    path = p;
+  }
 
   /* These are provided so that these values could be loaded lazily 
    * by a filesystem (e.g. local file system).

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileSystem.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileSystem.java Sat Nov 28 19:53:33 2009
@@ -23,7 +23,6 @@
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -42,7 +41,10 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
@@ -67,7 +69,10 @@
  * implementation is DistributedFileSystem.
  *****************************************************************/
 public abstract class FileSystem extends Configured implements Closeable {
-  public static final String FS_DEFAULT_NAME_KEY = "fs.default.name";
+  public static final String FS_DEFAULT_NAME_KEY = 
+                   CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
+  public static final String DEFAULT_FS = 
+                   CommonConfigurationKeys.FS_DEFAULT_NAME_DEFAULT;
 
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
 
@@ -103,7 +108,7 @@
    * @return the uri of the default filesystem
    */
   public static URI getDefaultUri(Configuration conf) {
-    return URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, "file:///")));
+    return URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, DEFAULT_FS)));
   }
 
   /** Set the default filesystem URI in a configuration.
@@ -181,6 +186,11 @@
         return get(defaultUri, conf);              // return default
       }
     }
+    
+    String disableCacheName = String.format("fs.%s.impl.disable.cache", scheme);
+    if (conf.getBoolean(disableCacheName, false)) {
+      return createFileSystem(uri, conf);
+    }
 
     return CACHE.get(uri, conf);
   }
@@ -239,7 +249,7 @@
   /** Make sure that a path specifies a FileSystem. */
   public Path makeQualified(Path path) {
     checkPath(path);
-    return path.makeQualified(this);
+    return path.makeQualified(this.getUri(), this.getWorkingDirectory());
   }
     
   /** create a file with the provided permission
@@ -358,8 +368,42 @@
     String[] host = { "localhost" };
     return new BlockLocation[] { new BlockLocation(name, host, 0, file.getLen()) };
   }
+ 
+
+  /**
+   * Return an array containing hostnames, offset and size of 
+   * portions of the given file.  For a nonexistent 
+   * file or regions, null will be returned.
+   *
+   * This call is most helpful with DFS, where it returns 
+   * hostnames of machines that contain the given file.
+   *
+   * The FileSystem will simply return an elt containing 'localhost'.
+   */
+  public BlockLocation[] getFileBlockLocations(Path p, 
+      long start, long len) throws IOException {
+    if (p == null) {
+      throw new NullPointerException();
+    }
+    FileStatus file = getFileStatus(p);
+    return getFileBlockLocations(file, start, len);
+  }
   
   /**
+   * Return a set of server default configuration values
+   * @return server default configuration values
+   * @throws IOException
+   */
+  public FsServerDefaults getServerDefaults() throws IOException {
+    Configuration conf = getConf();
+    return new FsServerDefaults(getDefaultBlockSize(), 
+        conf.getInt("io.bytes.per.checksum", 512), 
+        64 * 1024, 
+        getDefaultReplication(), 
+        conf.getInt("io.file.buffer.size", 4096));
+  }
+
+  /**
    * Opens an FSDataInputStream at the indicated Path.
    * @param f the file name to open
    * @param bufferSize the size of the buffer to be used.
@@ -536,7 +580,7 @@
    * Opens an FSDataOutputStream at the indicated Path with write-progress
    * reporting.
    * @param f the file name to open.
-   * @param permission
+   * @param permission - applied against umask
    * @param flag determines the semantic of this create.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file.
@@ -550,6 +594,183 @@
       EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException ;
   
+  /*.
+   * This create has been added to support the FileContext that processes
+   * the permission
+   * with umask before calling this method.
+   * This a temporary method added to support the transition from FileSystem
+   * to FileContext for user applications.
+   */
+  @Deprecated
+  protected FSDataOutputStream primitiveCreate(Path f,
+     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
+     short replication, long blockSize, Progressable progress,
+     int bytesPerChecksum) throws IOException {
+    
+    // Default impl  assumes that permissions do not matter and 
+    // nor does the bytesPerChecksum  hence
+    // calling the regular create is good enough.
+    // FSs that implement permissions should override this.
+    
+    return this.create(f, absolutePermission, flag, bufferSize, replication,
+        blockSize, progress);
+  }
+  
+  
+  /*.
+   * This create has been added to support the FileContext that passes
+   * an absolute permission with (ie umask was already applied) 
+   * This a temporary method added to support the transition from FileSystem
+   * to FileContext for user applications.
+   */
+  @Deprecated
+  protected FSDataOutputStream primitiveCreate(final Path f,
+      final EnumSet<CreateFlag> createFlag,
+      CreateOpts... opts) throws IOException {
+    checkPath(f);
+    int bufferSize = -1;
+    short replication = -1;
+    long blockSize = -1;
+    int bytesPerChecksum = -1;
+    FsPermission permission = null;
+    Progressable progress = null;
+    Boolean createParent = null;
+ 
+    for (CreateOpts iOpt : opts) {
+      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
+        if (blockSize != -1) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
+      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
+        if (bufferSize != -1) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
+      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
+        if (replication != -1) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
+      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
+        if (bytesPerChecksum != -1) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
+      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
+        if (permission != null) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        permission = ((CreateOpts.Perms) iOpt).getValue();
+      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
+        if (progress != null) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        progress = ((CreateOpts.Progress) iOpt).getValue();
+      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
+        if (createParent != null) {
+          throw new IllegalArgumentException("multiple varargs of same kind");
+        }
+        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
+      } else {
+        throw new IllegalArgumentException("Unkown CreateOpts of type " +
+            iOpt.getClass().getName());
+      }
+    }
+    if (blockSize % bytesPerChecksum != 0) {
+      throw new IllegalArgumentException(
+          "blockSize should be a multiple of checksumsize");
+    }
+    
+    FsServerDefaults ssDef = getServerDefaults();
+    
+    if (blockSize == -1) {
+      blockSize = ssDef.getBlockSize();
+    }
+    if (bufferSize == -1) {
+      bufferSize = ssDef.getFileBufferSize();
+    }
+    if (replication == -1) {
+      replication = ssDef.getReplication();
+    }
+    if (permission == null) {
+      permission = FsPermission.getDefault();
+    }
+    if (createParent == null) {
+      createParent = false;
+    }
+    
+    // Default impl  assumes that permissions do not matter and 
+    // nor does the bytesPerChecksum  hence
+    // calling the regular create is good enough.
+    // FSs that implement permissions should override this.
+
+    if (!createParent) { // parent must exist.
+      // since this.create makes parent dirs automatically
+      // we must throw exception if parent does not exist.
+      final FileStatus stat = getFileStatus(f.getParent());
+      if (stat == null) {
+        throw new FileNotFoundException("Missing parent:" + f);
+      }
+      if (!stat.isDir()) {
+          throw new ParentNotDirectoryException("parent is not a dir:" + f);
+      }
+      // parent does exist - go ahead with create of file.
+    }
+    return this.create(f, permission, createFlag, bufferSize, replication,
+        blockSize, progress);
+  }
+  
+
+  /**
+   * This version of the mkdirs method assumes that the permission is absolute.
+   * It has been added to support the FileContext that processes the permission
+   * with umask before calling this method.
+   * This a temporary method added to support the transition from FileSystem
+   * to FileContext for user applications.
+   */
+  @Deprecated
+  protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
+    throws IOException {
+    // Default impl is to assume that permissions do not matter and hence
+    // calling the regular mkdirs is good enough.
+    // FSs that implement permissions should override this.
+   return this.mkdirs(f, absolutePermission);
+  }
+
+
+  /**
+   * This version of the mkdirs method assumes that the permission is absolute.
+   * It has been added to support the FileContext that processes the permission
+   * with umask before calling this method.
+   * This a temporary method added to support the transition from FileSystem
+   * to FileContext for user applications.
+   */
+  @Deprecated
+  protected void primitiveMkdir(Path f, FsPermission absolutePermission, 
+                    boolean createParent)
+    throws IOException {
+    
+    if (!createParent) { // parent must exist.
+      // since the this.mkdirs makes parent dirs automatically
+      // we must throw exception if parent does not exist.
+      final FileStatus stat = getFileStatus(f.getParent());
+      if (stat == null) {
+        throw new FileNotFoundException("Missing parent:" + f);
+      }
+      if (!stat.isDir()) {
+          throw new ParentNotDirectoryException("parent is not a dir");
+      }
+      // parent does exist - go ahead with mkdir of leaf
+    }
+    // Default impl is to assume that permissions do not matter and hence
+    // calling the regular mkdirs is good enough.
+    // FSs that implement permissions should override this.
+    if (!this.mkdirs(f, absolutePermission)) {
+      throw new IOException("mkdir of "+ f + " failed");
+    }
+  }
+
 
   /**
    * Creates the given Path as a brand-new zero-length file.  If
@@ -611,9 +832,98 @@
   /**
    * Renames Path src to Path dst.  Can take place on local fs
    * or remote DFS.
+   * @throws IOException on failure
+   * @return true if rename is successful
    */
   public abstract boolean rename(Path src, Path dst) throws IOException;
-    
+
+  /**
+   * Renames Path src to Path dst
+   * <ul>
+   * <li
+   * <li>Fails if src is a file and dst is a directory.
+   * <li>Fails if src is a directory and dst is a file.
+   * <li>Fails if the parent of dst does not exist or is a file.
+   * </ul>
+   * <p>
+   * If OVERWRITE option is not passed as an argument, rename fails
+   * if the dst already exists.
+   * <p>
+   * If OVERWRITE option is passed as an argument, rename overwrites
+   * the dst if it is a file or an empty directory. Rename fails if dst is
+   * a non-empty directory.
+   * <p>
+   * Note that atomicity of rename is dependent on the file system
+   * implementation. Please refer to the file system documentation for
+   * details. This default implementation is non atomic.
+   * <p>
+   * This method is deprecated since it is a temporary method added to 
+   * support the transition from FileSystem to FileContext for user 
+   * applications.
+   * 
+   * @param src path to be renamed
+   * @param dst new path after rename
+   * @throws IOException on failure
+   */
+  @Deprecated
+  protected void rename(final Path src, final Path dst,
+      final Rename... options) throws IOException {
+    // Default implementation
+    final FileStatus srcStatus = getFileStatus(src);
+    if (srcStatus == null) {
+      throw new FileNotFoundException("rename source " + src + " not found.");
+    }
+
+    boolean overwrite = false;
+    if (null != options) {
+      for (Rename option : options) {
+        if (option == Rename.OVERWRITE) {
+          overwrite = true;
+        }
+      }
+    }
+
+    FileStatus dstStatus;
+    try {
+      dstStatus = getFileStatus(dst);
+    } catch (IOException e) {
+      dstStatus = null;
+    }
+    if (dstStatus != null) {
+      if (srcStatus.isDir() != dstStatus.isDir()) {
+        throw new IOException("Source " + src + " Destination " + dst
+            + " both should be either file or directory");
+      }
+      if (!overwrite) {
+        throw new FileAlreadyExistsException("rename destination " + dst
+            + " already exists.");
+      }
+      // Delete the destination that is a file or an empty directory
+      if (dstStatus.isDir()) {
+        FileStatus[] list = listStatus(dst);
+        if (list != null && list.length != 0) {
+          throw new IOException(
+              "rename cannot overwrite non empty destination directory " + dst);
+        }
+      }
+      delete(dst, false);
+    } else {
+      final Path parent = dst.getParent();
+      final FileStatus parentStatus = getFileStatus(parent);
+      if (parentStatus == null) {
+        throw new FileNotFoundException("rename destination parent " + parent
+            + " not found.");
+      }
+      if (!parentStatus.isDir()) {
+        throw new ParentNotDirectoryException("rename destination parent " + parent
+            + " is a file.");
+      }
+    }
+    if (!rename(src, dst)) {
+      throw new IOException("rename from " + src + " to " + dst + " failed.");
+    }
+  }
+  
   /** Delete a file.
    *
    * @param f the path to delete.
@@ -1123,8 +1433,8 @@
    * The default implementation returns "/user/$USER/".
    */
   public Path getHomeDirectory() {
-    return new Path("/user/"+System.getProperty("user.name"))
-      .makeQualified(this);
+    return this.makeQualified(
+        new Path("/user/"+System.getProperty("user.name")));
   }
 
 
@@ -1141,6 +1451,23 @@
    * @return the directory pathname
    */
   public abstract Path getWorkingDirectory();
+  
+  
+  /**
+   * Note: with the new FilesContext class, getWorkingDirectory()
+   * will be removed. 
+   * The working directory is implemented in FilesContext.
+   * 
+   * Some file systems like LocalFileSystem have an initial workingDir
+   * that we use as the starting workingDir. For other file systems
+   * like HDFS there is no built in notion of an inital workingDir.
+   * 
+   * @return if there is built in notion of workingDir then it
+   * is returned; else a null is returned.
+   */
+  protected Path getInitialWorkingDirectory() {
+    return null;
+  }
 
   /**
    * Call {@link #mkdirs(Path, FsPermission)} with default permission.

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileUtil.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileUtil.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FileUtil.java Sat Nov 28 19:53:33 2009
@@ -628,14 +628,18 @@
      * Retrieves the number of links to the specified file.
      */
     public static int getLinkCount(File fileName) throws IOException {
+      if (!fileName.exists()) {
+        throw new FileNotFoundException(fileName + " not found.");
+      }
+
       int len = getLinkCountCommand.length;
       String[] cmd = new String[len + 1];
       for (int i = 0; i < len; i++) {
         cmd[i] = getLinkCountCommand[i];
       }
       cmd[len] = fileName.toString();
-      String inpMsg = "";
-      String errMsg = "";
+      String inpMsg = null;
+      String errMsg = null;
       int exitValue = -1;
       BufferedReader in = null;
       BufferedReader err = null;
@@ -647,14 +651,11 @@
         in = new BufferedReader(new InputStreamReader(
                                     process.getInputStream()));
         inpMsg = in.readLine();
-        if (inpMsg == null)  inpMsg = "";
-        
         err = new BufferedReader(new InputStreamReader(
                                      process.getErrorStream()));
         errMsg = err.readLine();
-        if (errMsg == null)  errMsg = "";
-        if (exitValue != 0) {
-          throw new IOException(inpMsg + errMsg);
+        if (inpMsg == null || exitValue != 0) {
+          throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
         }
         if (getOSType() == OSType.OS_TYPE_SOLARIS) {
           String[] result = inpMsg.split("\\s+");
@@ -663,13 +664,9 @@
           return Integer.parseInt(inpMsg);
         }
       } catch (NumberFormatException e) {
-        throw new IOException(StringUtils.stringifyException(e) + 
-                              inpMsg + errMsg +
-                              " on file:" + fileName);
+        throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
       } catch (InterruptedException e) {
-        throw new IOException(StringUtils.stringifyException(e) + 
-                              inpMsg + errMsg +
-                              " on file:" + fileName);
+        throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
       } finally {
         process.destroy();
         if (in != null) in.close();
@@ -678,6 +675,16 @@
     }
   }
 
+  /** Create an IOException for failing to get link count. */
+  static private IOException createIOException(File f, String message,
+      String error, int exitvalue, Exception cause) {
+    final String s = "Failed to get link count on file " + f
+        + ": message=" + message
+        + "; error=" + error
+        + "; exit value=" + exitvalue;
+    return cause == null? new IOException(s): new IOException(s, cause);
+  }
+
   /**
    * Create a soft link between a src and destination
    * only on a local disk. HDFS does not support this

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FilterFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FilterFileSystem.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FilterFileSystem.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FilterFileSystem.java Sat Nov 28 19:53:33 2009
@@ -167,7 +167,11 @@
   public Path getWorkingDirectory() {
     return fs.getWorkingDirectory();
   }
-
+  
+  protected Path getInitialWorkingDirectory() {
+    return fs.getInitialWorkingDirectory();
+  }
+  
   /** {@inheritDoc} */
   @Override
   public FsStatus getStatus(Path p) throws IOException {
@@ -276,4 +280,19 @@
       ) throws IOException {
     fs.setPermission(p, permission);
   }
+
+  @Override
+  protected FSDataOutputStream primitiveCreate(Path f,
+      FsPermission absolutePermission, EnumSet<CreateFlag> flag,
+      int bufferSize, short replication, long blockSize, Progressable progress, int bytesPerChecksum)
+      throws IOException {
+    return fs.primitiveCreate(f, absolutePermission, flag,
+        bufferSize, replication, blockSize, progress, bytesPerChecksum);
+  }
+
+  @Override
+  protected boolean primitiveMkdir(Path f, FsPermission abdolutePermission)
+      throws IOException {
+    return fs.primitiveMkdir(f, abdolutePermission);
+  }
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShell.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShell.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShell.java Sat Nov 28 19:53:33 2009
@@ -367,11 +367,13 @@
     DataOutputBuffer outbuf;
 
     public TextRecordInputStream(FileStatus f) throws IOException {
-      r = new SequenceFile.Reader(fs, f.getPath(), getConf());
-      key = ReflectionUtils.newInstance(r.getKeyClass().asSubclass(WritableComparable.class),
-                                        getConf());
-      val = ReflectionUtils.newInstance(r.getValueClass().asSubclass(Writable.class),
-                                        getConf());
+      final Path fpath = f.getPath();
+      final Configuration lconf = getConf();
+      r = new SequenceFile.Reader(fpath.getFileSystem(lconf), fpath, lconf);
+      key = ReflectionUtils.newInstance(
+          r.getKeyClass().asSubclass(WritableComparable.class), lconf);
+      val = ReflectionUtils.newInstance(
+          r.getValueClass().asSubclass(Writable.class), lconf);
       inbuf = new DataInputBuffer();
       outbuf = new DataOutputBuffer();
     }
@@ -1130,10 +1132,20 @@
     }
     
     if(!skipTrash) {
-      Trash trashTmp = new Trash(srcFs, getConf());
-      if (trashTmp.moveToTrash(src)) {
-        System.out.println("Moved to trash: " + src);
-        return;
+      try {
+	      Trash trashTmp = new Trash(srcFs, getConf());
+        if (trashTmp.moveToTrash(src)) {
+          System.out.println("Moved to trash: " + src);
+          return;
+        }
+      } catch (IOException e) {
+        Exception cause = (Exception) e.getCause();
+        String msg = "";
+        if(cause != null) {
+          msg = cause.getLocalizedMessage();
+        }
+        System.err.println("Problem with Trash." + msg +". Consider using -skipTrash option");        
+        throw e;
       }
     }
     
@@ -1327,7 +1339,7 @@
     String summary = "hadoop fs is the command to execute fs commands. " +
       "The full syntax is: \n\n" +
       "hadoop fs [-fs <local | file system URI>] [-conf <configuration file>]\n\t" +
-      "[-D <property=value>] [-ls <path>] [-lsr <path>] [-df [<path>]] [-du <path>]\n\t" + 
+      "[-D <property=value>] [-ls <path>] [-lsr <path>] [-df [<path>]] [-du [-s] [-h] <path>]\n\t" +
       "[-dus <path>] [-mv <src> <dst>] [-cp <src> <dst>] [-rm [-skipTrash] <src>]\n\t" + 
       "[-rmr [-skipTrash] <src>] [-put <localsrc> ... <dst>] [-copyFromLocal <localsrc> ... <dst>]\n\t" +
       "[-moveFromLocal <localsrc> ... <dst>] [" + 
@@ -1377,17 +1389,20 @@
       "\t\tIf the filesystem has multiple partitions, and no path to a particular partition\n"+
       "\t\tis specified, then the status of the root partitions will be shown.\n";
 
-    String du = "-du <path>: \tShow the amount of space, in bytes, used by the files that \n" +
-      "\t\tmatch the specified file pattern.  Equivalent to the unix\n" + 
-      "\t\tcommand \"du -sb <path>/*\" in case of a directory, \n" +
-      "\t\tand to \"du -b <path>\" in case of a file.\n" +
+    String du = "-du [-s] [-h] <path>: \tShow the amount of space, in bytes, used by the files that \n" +
+      "\t\tmatch the specified file pattern. The following flags are optional:\n" +
+      "\t\t  -s   Rather than showing the size of each individual file that\n" +
+      "\t\t       matches the pattern, shows the total (summary) size.\n" +
+      "\t\t  -h   Formats the sizes of files in a human-readable fashion\n" +
+      "\t\t       rather than a number of bytes.\n" +
+      "\n" + 
+      "\t\tNote that, even without the -s option, this only shows size summaries\n" +
+      "\t\tone level deep into a directory.\n" +
       "\t\tThe output is in the form \n" + 
-      "\t\t\tname(full path) size (in bytes)\n"; 
+      "\t\t\tsize\tname(full path)\n"; 
 
     String dus = "-dus <path>: \tShow the amount of space, in bytes, used by the files that \n" +
-      "\t\tmatch the specified file pattern.  Equivalent to the unix\n" + 
-      "\t\tcommand \"du -sb\"  The output is in the form \n" + 
-      "\t\t\tname(full path) size (in bytes)\n"; 
+      "\t\tmatch the specified file pattern. This is equivalent to -du -s above.\n";
     
     String mv = "-mv <src> <dst>:   Move files that match the specified file pattern <src>\n" +
       "\t\tto a destination <dst>.  When moving multiple files, the \n" +
@@ -1556,7 +1571,7 @@
       System.out.println(chown);
     } else if ("chgrp".equals(cmd)) {
       System.out.println(chgrp);
-    } else if (Count.matches(cmd)) {
+    } else if (Count.NAME.equals(cmd)) {
       System.out.println(Count.DESCRIPTION);
     } else if ("help".equals(cmd)) {
       System.out.println(help);
@@ -1732,7 +1747,7 @@
       System.err.println("           [-ls <path>]");
       System.err.println("           [-lsr <path>]");
       System.err.println("           [-df [<path>]]");
-      System.err.println("           [-du <path>]");
+      System.err.println("           [-du [-s] [-h] <path>]");
       System.err.println("           [-dus <path>]");
       System.err.println("           [" + Count.USAGE + "]");
       System.err.println("           [-mv <src> <dst>]");

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShellPermissions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShellPermissions.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShellPermissions.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/FsShellPermissions.java Sat Nov 28 19:53:33 2009
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.fs.FsShell.CmdHandler;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.ChmodParser;
 
 
 /**
@@ -39,168 +40,33 @@
    * also enforce octal mode specifications of either 3 digits without a sticky
    * bit setting or four digits with a sticky bit setting.
    */
-  private static Pattern chmodNormalPattern =
-   Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxXt]+)([,\\s]*)\\s*");
-  private static Pattern chmodOctalPattern =
-            Pattern.compile("^\\s*[+]?([01]?)([0-7]{3})\\s*$");
 
   static String CHMOD_USAGE =
                             "-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...";
 
+  private static  ChmodParser pp;
+  
   private static class ChmodHandler extends CmdHandler {
 
-    private short userMode;
-    private short groupMode;
-    private short othersMode;
-    private short stickyMode;
-    private char userType = '+';
-    private char groupType = '+';
-    private char othersType = '+';
-    private char stickyBitType = '+';
-
-    private void applyNormalPattern(String modeStr, Matcher matcher)
-                                    throws IOException {
-      // Are there multiple permissions stored in one chmod?
-      boolean commaSeperated = false;
-
-      for(int i=0; i < 1 || matcher.end() < modeStr.length(); i++) {
-        if (i>0 && (!commaSeperated || !matcher.find())) {
-          patternError(modeStr);
-        }
-
-        /* groups : 1 : [ugoa]*
-         *          2 : [+-=]
-         *          3 : [rwxXt]+
-         *          4 : [,\s]*
-         */
-
-        String str = matcher.group(2);
-        char type = str.charAt(str.length() - 1);
-
-        boolean user, group, others, stickyBit;
-        user = group = others = stickyBit = false;
-
-        for(char c : matcher.group(1).toCharArray()) {
-          switch (c) {
-          case 'u' : user = true; break;
-          case 'g' : group = true; break;
-          case 'o' : others = true; break;
-          case 'a' : break;
-          default  : throw new RuntimeException("Unexpected");          
-          }
-        }
-
-        if (!(user || group || others)) { // same as specifying 'a'
-          user = group = others = true;
-        }
-
-        short mode = 0;
-
-        for(char c : matcher.group(3).toCharArray()) {
-          switch (c) {
-          case 'r' : mode |= 4; break;
-          case 'w' : mode |= 2; break;
-          case 'x' : mode |= 1; break;
-          case 'X' : mode |= 8; break;
-          case 't' : stickyBit = true; break;
-          default  : throw new RuntimeException("Unexpected");
-          }
-        }
-
-        if ( user ) {
-          userMode = mode;
-          userType = type;
-        }
-
-        if ( group ) {
-          groupMode = mode;
-          groupType = type;
-        }
-
-        if ( others ) {
-          othersMode = mode;
-          othersType = type;
-          
-          stickyMode = (short) (stickyBit ? 1 : 0);
-          stickyBitType = type;
-        }
-
-        commaSeperated = matcher.group(4).contains(",");
-      }
-    }
-
-    private void applyOctalPattern(String modeStr, Matcher matcher) {
-      userType = groupType = othersType = '=';
-
-      // Check if sticky bit is specified
-      String sb = matcher.group(1);
-      if(!sb.isEmpty()) {
-        stickyMode = Short.valueOf(sb.substring(0, 1));
-        stickyBitType = '=';
-      }
-
-      String str = matcher.group(2);
-      userMode = Short.valueOf(str.substring(0, 1));
-      groupMode = Short.valueOf(str.substring(1, 2));
-      othersMode = Short.valueOf(str.substring(2, 3));      
-    }
-
-    private void patternError(String mode) throws IOException {
-      throw new IOException("chmod : mode '" + mode + 
-                            "' does not match the expected pattern.");      
-    }
-
     ChmodHandler(FileSystem fs, String modeStr) throws IOException {
       super("chmod", fs);
-      Matcher matcher = null;
-
-      if ((matcher = chmodNormalPattern.matcher(modeStr)).find()) {
-        applyNormalPattern(modeStr, matcher);
-      } else if ((matcher = chmodOctalPattern.matcher(modeStr)).matches()) {
-        applyOctalPattern(modeStr, matcher);
-      } else {
-        patternError(modeStr);
+      try {
+        pp = new ChmodParser(modeStr);
+      } catch(IllegalArgumentException iea) {
+        patternError(iea.getMessage());
       }
     }
 
-    private int applyChmod(char type, int mode, int existing, boolean exeOk) {
-      boolean capX = false;
-
-      if ((mode&8) != 0) { // convert X to x;
-        capX = true;
-        mode &= ~8;
-        mode |= 1;
-      }
-
-      switch (type) {
-      case '+' : mode = mode | existing; break;
-      case '-' : mode = (~mode) & existing; break;
-      case '=' : break;
-      default  : throw new RuntimeException("Unexpected");      
-      }
-
-      // if X is specified add 'x' only if exeOk or x was already set.
-      if (capX && !exeOk && (mode&1) != 0 && (existing&1) == 0) {
-        mode &= ~1; // remove x
-      }
-
-      return mode;
+    private void patternError(String mode) throws IOException {
+     throw new IOException("chmod : mode '" + mode + 
+         "' does not match the expected pattern.");      
     }
-
+    
     @Override
     public void run(FileStatus file, FileSystem srcFs) throws IOException {
-      FsPermission perms = file.getPermission();
-      int existing = perms.toShort();
-      boolean exeOk = file.isDir() || (existing & 0111) != 0;
-      int newperms = ( applyChmod(stickyBitType, stickyMode,
-                             (existing>>>9), false) << 9 |
-                       applyChmod(userType, userMode,
-                             (existing>>>6)&7, exeOk) << 6 |
-                       applyChmod(groupType, groupMode,
-                             (existing>>>3)&7, exeOk) << 3 |
-                       applyChmod(othersType, othersMode, existing&7, exeOk));
+      int newperms = pp.applyNewPermission(file);
 
-      if (existing != newperms) {
+      if (file.getPermission().toShort() != newperms) {
         try {
           srcFs.setPermission(file.getPath(), 
                                 new FsPermission((short)newperms));

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/HarFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/HarFileSystem.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/HarFileSystem.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/HarFileSystem.java Sat Nov 28 19:53:33 2009
@@ -302,19 +302,8 @@
     }
 
     URI tmpURI = fsPath.toUri();
-    fsPath = new Path(tmpURI.getPath());
     //change this to Har uri 
-    URI tmp = null;
-    try {
-      tmp = new URI(uri.getScheme(), harAuth, fsPath.toString(),
-                    tmpURI.getQuery(), tmpURI.getFragment());
-    } catch(URISyntaxException ue) {
-      LOG.error("Error in URI ", ue);
-    }
-    if (tmp != null) {
-      return new Path(tmp.toString());
-    }
-    return null;
+    return new Path(uri.getScheme(), harAuth, tmpURI.getPath());
   }
   
   /**
@@ -426,12 +415,13 @@
       // do nothing just a read.
     }
     FSDataInputStream aIn = fs.open(archiveIndex);
-    LineReader aLin = new LineReader(aIn, getConf());
+    LineReader aLin;
     String retStr = null;
     // now start reading the real index file
-     read = 0;
     for (Store s: stores) {
+      read = 0;
       aIn.seek(s.begin);
+      aLin = new LineReader(aIn, getConf());
       while (read + s.begin < s.end) {
         int tmp = aLin.readLine(line);
         read += tmp;

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Path.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Path.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Path.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Path.java Sat Nov 28 19:53:33 2009
@@ -63,13 +63,13 @@
     if (!(parentPath.equals("/") || parentPath.equals("")))
       try {
         parentUri = new URI(parentUri.getScheme(), parentUri.getAuthority(),
-                            parentUri.getPath()+"/", null, null);
+                      parentUri.getPath()+"/", null, parentUri.getFragment());
       } catch (URISyntaxException e) {
         throw new IllegalArgumentException(e);
       }
     URI resolved = parentUri.resolve(child.uri);
     initialize(resolved.getScheme(), resolved.getAuthority(),
-               normalizePath(resolved.getPath()));
+               normalizePath(resolved.getPath()), resolved.getFragment());
   }
 
   private void checkPathArg( String path ) {
@@ -123,18 +123,26 @@
     // uri path is the rest of the string -- query & fragment not supported
     String path = pathString.substring(start, pathString.length());
 
-    initialize(scheme, authority, path);
+    initialize(scheme, authority, path, null);
   }
 
+  /**
+   * Construct a path from a URI
+   */
+  public Path(URI aUri) {
+    uri = aUri;
+  }
+  
   /** Construct a Path from components. */
   public Path(String scheme, String authority, String path) {
     checkPathArg( path );
-    initialize(scheme, authority, path);
+    initialize(scheme, authority, path, null);
   }
 
-  private void initialize(String scheme, String authority, String path) {
+  private void initialize(String scheme, String authority, String path,
+      String fragment) {
     try {
-      this.uri = new URI(scheme, authority, normalizePath(path), null, null)
+      this.uri = new URI(scheme, authority, normalizePath(path), null, fragment)
         .normalize();
     } catch (URISyntaxException e) {
       throw new IllegalArgumentException(e);
@@ -175,10 +183,23 @@
     return FileSystem.get(this.toUri(), conf);
   }
 
-  /** True if the directory of this path is absolute. */
-  public boolean isAbsolute() {
+  /**
+   *  True if the path component (i.e. directory) of this URI is absolute.
+   */
+  public boolean isUriPathAbsolute() {
     int start = hasWindowsDrive(uri.getPath(), true) ? 3 : 0;
     return uri.getPath().startsWith(SEPARATOR, start);
+   }
+  
+  /** True if the directory of this path is absolute. */
+  /**
+   * There is some ambiguity here. An absolute path is a slash
+   * relative name without a scheme or an authority.
+   * So either this method was incorrectly named or its
+   * implementation is incorrect.
+   */
+  public boolean isAbsolute() {
+     return isUriPathAbsolute();
   }
 
   /** Returns the final component of this path.*/
@@ -233,6 +254,10 @@
         path = path.substring(1);                 // remove slash before drive
       buffer.append(path);
     }
+    if (uri.getFragment() != null) {
+      buffer.append("#");
+      buffer.append(uri.getFragment());
+    }
     return buffer.toString();
   }
 
@@ -265,34 +290,54 @@
     return depth;
   }
 
-  /** Returns a qualified path object. */
+  
+  /**
+   *  Returns a qualified path object.
+   *  
+   *  Deprecated - use {@link #makeQualified(URI, Path)}
+   */
+ 
+  @Deprecated
   public Path makeQualified(FileSystem fs) {
+    return makeQualified(fs.getUri(), fs.getWorkingDirectory());
+  }
+  
+  
+  /** Returns a qualified path object. */
+  public Path makeQualified(URI defaultUri, Path workingDir ) {
     Path path = this;
     if (!isAbsolute()) {
-      path = new Path(fs.getWorkingDirectory(), this);
+      path = new Path(workingDir, this);
     }
 
     URI pathUri = path.toUri();
-    URI fsUri = fs.getUri();
       
     String scheme = pathUri.getScheme();
     String authority = pathUri.getAuthority();
+    String fragment = pathUri.getFragment();
 
     if (scheme != null &&
-        (authority != null || fsUri.getAuthority() == null))
+        (authority != null || defaultUri.getAuthority() == null))
       return path;
 
     if (scheme == null) {
-      scheme = fsUri.getScheme();
+      scheme = defaultUri.getScheme();
     }
 
     if (authority == null) {
-      authority = fsUri.getAuthority();
+      authority = defaultUri.getAuthority();
       if (authority == null) {
         authority = "";
       }
     }
-
-    return new Path(scheme+":"+"//"+authority + pathUri.getPath());
+    
+    URI newUri = null;
+    try {
+      newUri = new URI(scheme, authority , 
+        normalizePath(pathUri.getPath()), null, fragment);
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException(e);
+    }
+    return new Path(newUri);
   }
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java Sat Nov 28 19:53:33 2009
@@ -46,7 +46,7 @@
   private Path workingDir;
   
   public RawLocalFileSystem() {
-    workingDir = new Path(System.getProperty("user.dir")).makeQualified(this);
+    workingDir = getInitialWorkingDirectory();
   }
   
   /** Convert a path to a File. */
@@ -96,10 +96,10 @@
   }
 
   /*******************************************************
-   * For open()'s FSInputStream
+   * For open()'s FSInputStream.
    *******************************************************/
   class LocalFSFileInputStream extends FSInputStream {
-    FileInputStream fis;
+    private FileInputStream fis;
     private long position;
 
     public LocalFSFileInputStream(Path f) throws IOException {
@@ -180,8 +180,8 @@
   /*********************************************************
    * For create()'s FSOutputStream.
    *********************************************************/
-  class LocalFSFileOutputStream extends OutputStream implements Syncable {
-    FileOutputStream fos;
+  class LocalFSFileOutputStream extends OutputStream {
+    private FileOutputStream fos;
     
     private LocalFSFileOutputStream(Path f, boolean append) throws IOException {
       this.fos = new FileOutputStream(pathToFile(f), append);
@@ -207,13 +207,8 @@
         throw new FSError(e);                // assume native fs error
       }
     }
-
-    /** {@inheritDoc} */
-    public void sync() throws IOException {
-      fos.getFD().sync();      
-    }
   }
-  
+
   /** {@inheritDoc} */
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
@@ -229,7 +224,7 @@
 
   /** {@inheritDoc} */
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-                                   short replication, long blockSize, Progressable progress)
+    short replication, long blockSize, Progressable progress)
     throws IOException {
     if (exists(f) && !overwrite) {
       throw new IOException("File already exists:"+f);
@@ -245,23 +240,38 @@
   /** {@inheritDoc} */
   @Override
   public FSDataOutputStream create(Path f, FsPermission permission,
-      EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
-      Progressable progress) throws IOException {
+    EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
+    Progressable progress) throws IOException {
+    return primitiveCreate(f,
+        permission.applyUMask(FsPermission.getUMask(getConf())), flag,
+        bufferSize,  replication,  blockSize,  progress,  -1);
     
-      if(flag.contains(CreateFlag.APPEND)){
-        if (!exists(f)){
-          if(flag.contains(CreateFlag.CREATE))
-            return create(f, false, bufferSize, replication, blockSize, progress);
+    
+     
+  }
+  
+
+  @Override
+  protected FSDataOutputStream primitiveCreate(Path f,
+      FsPermission absolutePermission, EnumSet<CreateFlag> flag,
+      int bufferSize, short replication, long blockSize, Progressable progress,
+      int bytesPerChecksum) throws IOException {
+    
+    if(flag.contains(CreateFlag.APPEND)){
+      if (!exists(f)){
+        if(flag.contains(CreateFlag.CREATE)) {
+          return create(f, false, bufferSize, replication, blockSize, null);
         }
-        return append(f, bufferSize, progress);
+      }
+      return append(f, bufferSize, null);
     }
-   
-    FSDataOutputStream out = create(f,
-        flag.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize, progress);
-    setPermission(f, permission);
+ 
+    FSDataOutputStream out = create(f, flag.contains(CreateFlag.OVERWRITE),
+                                 bufferSize, replication, blockSize, progress);
+    setPermission(f, absolutePermission);
     return out;
   }
-  
+
   public boolean rename(Path src, Path dst) throws IOException {
     if (pathToFile(src).renameTo(pathToFile(dst))) {
       return true;
@@ -289,7 +299,7 @@
     }
     if (localf.isFile()) {
       return new FileStatus[] {
-          new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
+        new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
     }
 
     String[] names = localf.list();
@@ -308,8 +318,18 @@
    * treat existence as an error.
    */
   public boolean mkdirs(Path f) throws IOException {
+    if(f == null) {
+      throw new IllegalArgumentException("mkdirs path arg is null");
+    }
     Path parent = f.getParent();
     File p2f = pathToFile(f);
+    if(parent != null) {
+      File parent2f = pathToFile(parent);
+      if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
+        throw new FileAlreadyExistsException("Parent path is not a directory: " 
+            + parent);
+      }
+    }
     return (parent == null || mkdirs(parent)) &&
       (p2f.mkdir() || p2f.isDirectory());
   }
@@ -318,13 +338,25 @@
   @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     boolean b = mkdirs(f);
-    setPermission(f, permission);
+    if(b) {
+      setPermission(f, permission);
+    }
+    return b;
+  }
+  
+
+  @Override
+  protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
+    throws IOException {
+    boolean b = mkdirs(f);
+    setPermission(f, absolutePermission);
     return b;
   }
   
+  
   @Override
   public Path getHomeDirectory() {
-    return new Path(System.getProperty("user.home")).makeQualified(this);
+    return this.makeQualified(new Path(System.getProperty("user.home")));
   }
 
   /**
@@ -339,6 +371,11 @@
   public Path getWorkingDirectory() {
     return workingDir;
   }
+  
+  @Override
+  protected Path getInitialWorkingDirectory() {
+    return this.makeQualified(new Path(System.getProperty("user.dir")));
+  }
 
   /** {@inheritDoc} */
   @Override
@@ -380,7 +417,7 @@
     if (path.exists()) {
       return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
     } else {
-      throw new FileNotFoundException( "File " + f + " does not exist.");
+      throw new FileNotFoundException("File " + f + " does not exist.");
     }
   }
 
@@ -395,7 +432,7 @@
     
     RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
       super(f.length(), f.isDirectory(), 1, defaultBlockSize,
-            f.lastModified(), new Path(f.getPath()).makeQualified(fs));
+            f.lastModified(), fs.makeQualified(new Path(f.getPath())));
     }
     
     @Override
@@ -471,8 +508,8 @@
    * Use the command chown to set owner.
    */
   @Override
-  public void setOwner(Path p, String username, String groupname
-      ) throws IOException {
+  public void setOwner(Path p, String username, String groupname)
+    throws IOException {
     if (username == null && groupname == null) {
       throw new IOException("username == null && groupname == null");
     }
@@ -490,8 +527,8 @@
    * Use the command chmod to set permission.
    */
   @Override
-  public void setPermission(Path p, FsPermission permission
-      ) throws IOException {
+  public void setPermission(Path p, FsPermission permission)
+    throws IOException {
     execCommand(pathToFile(p), Shell.SET_PERMISSION_COMMAND,
         String.format("%05o", permission.toShort()));
   }
@@ -503,4 +540,5 @@
     String output = Shell.execCommand(args);
     return output;
   }
+
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Syncable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Syncable.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Syncable.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Syncable.java Sat Nov 28 19:53:33 2009
@@ -20,11 +20,23 @@
 
 import java.io.IOException;
 
-/** This interface declare the sync() operation. */
+/** This interface for flush/sync operation. */
 public interface Syncable {
   /**
-   * Synchronize all buffer with the underlying devices.
-   * @throws IOException
+   * @deprecated As of HADOOP 0.21.0, replaced by hflush
+   * @see #hflush()
    */
-  public void sync() throws IOException;
+  @Deprecated  public void sync() throws IOException;
+  
+  /** Flush out the data in client's user buffer. After the return of
+   * this call, new readers will see the data.
+   * @throws IOException if any error occurs
+   */
+  public void hflush() throws IOException;
+  
+  /** Similar to posix fsync, flush out the data in client's user buffer 
+   * all the way to the disk device (but the disk may have it in its cache).
+   * @throws IOException if error occurs
+   */
+  public void hsync() throws IOException;
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Trash.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Trash.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Trash.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/Trash.java Sat Nov 28 19:53:33 2009
@@ -123,12 +123,13 @@
     for (int i = 0; i < 2; i++) {
       try {
         if (!fs.mkdirs(baseTrashPath, PERMISSION)) {      // create current
-          LOG.warn("Can't create trash directory: "+baseTrashPath);
+          LOG.warn("Can't create(mkdir) trash directory: "+baseTrashPath);
           return false;
         }
       } catch (IOException e) {
         LOG.warn("Can't create trash directory: "+baseTrashPath);
-        return false;
+        cause = e;
+        break;
       }
       try {
         //

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/permission/FsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/permission/FsPermission.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/permission/FsPermission.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/permission/FsPermission.java Sat Nov 28 19:53:33 2009
@@ -17,17 +17,24 @@
  */
 package org.apache.hadoop.fs.permission;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.*;
-
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
 /**
  * A class for file/directory permissions.
  */
 public class FsPermission implements Writable {
+  private static final Log LOG = LogFactory.getLog(FsPermission.class);
+  
   static final WritableFactory FACTORY = new WritableFactory() {
     public Writable newInstance() { return new FsPermission(); }
   };
@@ -175,16 +182,29 @@
         otheraction.and(umask.otheraction.not()));
   }
 
-  /** umask property label */
-  public static final String UMASK_LABEL = "dfs.umask";
-  public static final int DEFAULT_UMASK = 0022;
+  /** umask property label Deprecated key may be removed in version .23 */
+  public static final String DEPRECATED_UMASK_LABEL = "dfs.umask"; 
+  public static final String UMASK_LABEL = 
+                  CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY;
+  public static final int DEFAULT_UMASK = 
+                  CommonConfigurationKeys.FS_PERMISSIONS_UMASK_DEFAULT;
 
   /** Get the user file creation mask (umask) */
   public static FsPermission getUMask(Configuration conf) {
     int umask = DEFAULT_UMASK;
-    if (conf != null) {
-      umask = conf.getInt(UMASK_LABEL, DEFAULT_UMASK);
+    
+    // Attempt to pull value from configuration, trying new key first and then
+    // deprecated key, along with a warning, if not present
+    if(conf != null) {
+      String confUmask = conf.get(UMASK_LABEL);
+      if(confUmask != null) { // UMASK_LABEL is set
+        if(conf.deprecatedKeyWasSet(DEPRECATED_UMASK_LABEL)) 
+          umask = Integer.parseInt(confUmask); // Evaluate as decimal value
+        else
+          umask = new UmaskParser(confUmask).getUMask();
+      } 
     }
+    
     return new FsPermission((short)umask);
   }
 

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java Sat Nov 28 19:53:33 2009
@@ -98,7 +98,10 @@
     }
     bucket = new S3Bucket(uri.getHost());
 
-    this.bufferSize = conf.getInt("io.file.buffer.size", 4096);
+    this.bufferSize = conf.getInt(
+                       S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_KEY,
+                       S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_DEFAULT
+		      );
   }
 
   public String getVersion() throws IOException {

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java Sat Nov 28 19:53:33 2009
@@ -455,6 +455,7 @@
     } while (priorLastKey != null);
     
     if (status.isEmpty() &&
+        key.length() > 0 &&
         store.retrieveMetadata(key + FOLDER_SUFFIX) == null) {
       throw new FileNotFoundException("File " + f + " does not exist.");
     }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/FilterInitializer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/FilterInitializer.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/FilterInitializer.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/FilterInitializer.java Sat Nov 28 19:53:33 2009
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.http;
 
+import org.apache.hadoop.conf.Configuration;
+
 /**
  * Initialize a javax.servlet.Filter. 
  */
@@ -24,6 +26,7 @@
   /**
    * Initialize a Filter to a FilterContainer.
    * @param container The filter container
+   * @param conf Configuration for run-time parameters
    */
-  abstract void initFilter(FilterContainer container);
+  public abstract void initFilter(FilterContainer container, Configuration conf);
 }
\ No newline at end of file

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/HttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/HttpServer.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/HttpServer.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/http/HttpServer.java Sat Nov 28 19:53:33 2009
@@ -23,14 +23,20 @@
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.util.ArrayList;
+import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.nio.channels.ServerSocketChannel;
 
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
@@ -117,10 +123,11 @@
 
     addDefaultApps(contexts, appDir);
 
+    addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
     final FilterInitializer[] initializers = getFilterInitializers(conf); 
     if (initializers != null) {
       for(FilterInitializer c : initializers) {
-        c.initFilter(this);
+        c.initFilter(this, conf);
       }
     }
     addDefaultServlets();
@@ -535,10 +542,103 @@
     public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
       
-      PrintWriter out = new PrintWriter(response.getOutputStream());
+      PrintWriter out = new PrintWriter
+                    (HtmlQuoting.quoteOutputStream(response.getOutputStream()));
       ReflectionUtils.printThreadInfo(out, "");
       out.close();
       ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);      
     }
   }
+  
+  /**
+   * A Servlet input filter that quotes all HTML active characters in the
+   * parameter names and values. The goal is to quote the characters to make
+   * all of the servlets resistant to cross-site scripting attacks.
+   */
+  public static class QuotingInputFilter implements Filter {
+
+    public static class RequestQuoter extends HttpServletRequestWrapper {
+      private final HttpServletRequest rawRequest;
+      public RequestQuoter(HttpServletRequest rawRequest) {
+        super(rawRequest);
+        this.rawRequest = rawRequest;
+      }
+      
+      /**
+       * Return the set of parameter names, quoting each name.
+       */
+      @SuppressWarnings("unchecked")
+      @Override
+      public Enumeration<String> getParameterNames() {
+        return new Enumeration<String>() {
+          private Enumeration<String> rawIterator = 
+            rawRequest.getParameterNames();
+          @Override
+          public boolean hasMoreElements() {
+            return rawIterator.hasMoreElements();
+          }
+
+          @Override
+          public String nextElement() {
+            return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement());
+          }
+        };
+      }
+      
+      /**
+       * Unquote the name and quote the value.
+       */
+      @Override
+      public String getParameter(String name) {
+        return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter
+                                     (HtmlQuoting.unquoteHtmlChars(name)));
+      }
+      
+      @Override
+      public String[] getParameterValues(String name) {
+        String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
+        String[] unquoteValue = rawRequest.getParameterValues(unquoteName);
+        String[] result = new String[unquoteValue.length];
+        for(int i=0; i < result.length; ++i) {
+          result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]);
+        }
+        return result;
+      }
+
+      @SuppressWarnings("unchecked")
+      @Override
+      public Map<String, String[]> getParameterMap() {
+        Map<String, String[]> result = new HashMap<String,String[]>();
+        Map<String, String[]> raw = rawRequest.getParameterMap();
+        for (Map.Entry<String,String[]> item: raw.entrySet()) {
+          String[] rawValue = item.getValue();
+          String[] cookedValue = new String[rawValue.length];
+          for(int i=0; i< rawValue.length; ++i) {
+            cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]);
+          }
+          result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue);
+        }
+        return result;
+      }
+    }
+
+    @Override
+    public void init(FilterConfig config) throws ServletException {
+    }
+
+    @Override
+    public void destroy() {
+    }
+
+    @Override
+    public void doFilter(ServletRequest request, 
+                         ServletResponse response,
+                         FilterChain chain
+                         ) throws IOException, ServletException {
+      HttpServletRequestWrapper quoted = 
+        new RequestQuoter((HttpServletRequest) request);
+      chain.doFilter(quoted, response);
+    }
+
+  }
 }

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/DefaultStringifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/DefaultStringifier.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/DefaultStringifier.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/DefaultStringifier.java Sat Nov 28 19:53:33 2009
@@ -21,20 +21,21 @@
 import java.io.IOException;
 import java.nio.charset.UnsupportedCharsetException;
 import java.util.ArrayList;
+import java.util.Map;
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.serializer.Deserializer;
-import org.apache.hadoop.io.serializer.Serialization;
+import org.apache.hadoop.io.serializer.DeserializerBase;
+import org.apache.hadoop.io.serializer.SerializationBase;
 import org.apache.hadoop.io.serializer.SerializationFactory;
-import org.apache.hadoop.io.serializer.Serializer;
+import org.apache.hadoop.io.serializer.SerializerBase;
 import org.apache.hadoop.util.GenericsUtil;
 
 /**
  * DefaultStringifier is the default implementation of the {@link Stringifier}
  * interface which stringifies the objects using base64 encoding of the
- * serialized version of the objects. The {@link Serializer} and
- * {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ * serialized version of the objects. The {@link SerializerBase} and
+ * {@link DeserializerBase} are obtained from the {@link SerializationFactory}.
  * <br>
  * DefaultStringifier offers convenience methods to store/load objects to/from
  * the configuration.
@@ -45,9 +46,9 @@
 
   private static final String SEPARATOR = ",";
 
-  private Serializer<T> serializer;
+  private SerializerBase<T> serializer;
 
-  private Deserializer<T> deserializer;
+  private DeserializerBase<T> deserializer;
 
   private DataInputBuffer inBuf;
 
@@ -56,8 +57,9 @@
   public DefaultStringifier(Configuration conf, Class<T> c) {
 
     SerializationFactory factory = new SerializationFactory(conf);
-    this.serializer = factory.getSerializer(c);
-    this.deserializer = factory.getDeserializer(c);
+    Map<String, String> metadata = SerializationBase.getMetadataFromClass(c);
+    this.serializer = factory.getSerializer(metadata);
+    this.deserializer = factory.getDeserializer(metadata);
     this.inBuf = new DataInputBuffer();
     this.outBuf = new DataOutputBuffer();
     try {
@@ -102,7 +104,7 @@
    * @param item the object to be stored
    * @param keyName the name of the key to use
    * @throws IOException : forwards Exceptions from the underlying 
-   * {@link Serialization} classes. 
+   * {@link SerializationBase} classes. 
    */
   public static <K> void store(Configuration conf, K item, String keyName)
   throws IOException {
@@ -122,7 +124,7 @@
    * @param itemClass the class of the item
    * @return restored object
    * @throws IOException : forwards Exceptions from the underlying 
-   * {@link Serialization} classes.
+   * {@link SerializationBase} classes.
    */
   public static <K> K load(Configuration conf, String keyName,
       Class<K> itemClass) throws IOException {
@@ -145,7 +147,7 @@
    * @param keyName the name of the key to use
    * @throws IndexOutOfBoundsException if the items array is empty
    * @throws IOException : forwards Exceptions from the underlying 
-   * {@link Serialization} classes.         
+   * {@link SerializationBase} classes.         
    */
   public static <K> void storeArray(Configuration conf, K[] items,
       String keyName) throws IOException {
@@ -173,7 +175,7 @@
    * @param itemClass the class of the item
    * @return restored object
    * @throws IOException : forwards Exceptions from the underlying 
-   * {@link Serialization} classes.
+   * {@link SerializationBase} classes.
    */
   public static <K> K[] loadArray(Configuration conf, String keyName,
       Class<K> itemClass) throws IOException {

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/SequenceFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/SequenceFile.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/SequenceFile.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/SequenceFile.java Sat Nov 28 19:53:33 2009
@@ -33,9 +33,10 @@
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
-import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.DeserializerBase;
+import org.apache.hadoop.io.serializer.SerializationBase;
+import org.apache.hadoop.io.serializer.SerializerBase;
 import org.apache.hadoop.io.serializer.SerializationFactory;
-import org.apache.hadoop.io.serializer.Serializer;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progress;
@@ -705,6 +706,14 @@
       return new TreeMap<Text, Text>(this.theMetadata);
     }
     
+    public Map<String, String> getMetadataAsStringMap() {
+      Map<String, String> map = new HashMap<String, String>();
+      for (Map.Entry<Text, Text> entry : theMetadata.entrySet()) {
+        map.put(entry.getKey().toString(), entry.getValue().toString());
+      }
+      return map;
+    }
+    
     public void write(DataOutput out) throws IOException {
       out.writeInt(this.theMetadata.size());
       Iterator<Map.Entry<Text, Text>> iter =
@@ -801,9 +810,9 @@
     Metadata metadata = null;
     Compressor compressor = null;
     
-    protected Serializer keySerializer;
-    protected Serializer uncompressedValSerializer;
-    protected Serializer compressedValSerializer;
+    protected SerializerBase keySerializer;
+    protected SerializerBase uncompressedValSerializer;
+    protected SerializerBase compressedValSerializer;
     
     // Insert a globally unique 16-byte value every few entries, so that one
     // can seek into the middle of a file and then synchronize with record
@@ -914,9 +923,10 @@
       this.codec = codec;
       this.metadata = metadata;
       SerializationFactory serializationFactory = new SerializationFactory(conf);
-      this.keySerializer = serializationFactory.getSerializer(keyClass);
+      this.keySerializer = getSerializer(serializationFactory, keyClass, metadata);
       this.keySerializer.open(buffer);
-      this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
+      this.uncompressedValSerializer = getSerializer(serializationFactory,
+        valClass, metadata);
       this.uncompressedValSerializer.open(buffer);
       if (this.codec != null) {
         ReflectionUtils.setConf(this.codec, this.conf);
@@ -924,11 +934,20 @@
         this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
         this.deflateOut = 
           new DataOutputStream(new BufferedOutputStream(deflateFilter));
-        this.compressedValSerializer = serializationFactory.getSerializer(valClass);
+        this.compressedValSerializer = getSerializer(serializationFactory,
+          valClass, metadata);
         this.compressedValSerializer.open(deflateOut);
       }
     }
     
+    @SuppressWarnings("unchecked")
+    private SerializerBase getSerializer(SerializationFactory sf, Class c,
+	Metadata metadata) {
+      Map<String, String> stringMetadata = metadata.getMetadataAsStringMap();
+      stringMetadata.put(SerializationBase.CLASS_KEY, c.getName());
+      return sf.getSerializer(stringMetadata);
+    }
+    
     /** Returns the class of keys in this file. */
     public Class getKeyClass() { return keyClass; }
 
@@ -1378,6 +1397,7 @@
     private byte[] syncCheck = new byte[SYNC_HASH_SIZE];
     private boolean syncSeen;
 
+    private long headerEnd;
     private long end;
     private int keyLength;
     private int recordLength;
@@ -1412,8 +1432,8 @@
     private DataInputStream valIn = null;
     private Decompressor valDecompressor = null;
     
-    private Deserializer keyDeserializer;
-    private Deserializer valDeserializer;
+    private DeserializerBase keyDeserializer;
+    private DeserializerBase valDeserializer;
 
     /** Open the named file. */
     public Reader(FileSystem fs, Path file, Configuration conf)
@@ -1527,6 +1547,7 @@
       
       if (version > 1) {                          // if version > 1
         in.readFully(sync);                       // read sync bytes
+        headerEnd = in.getPos();                  // record end of header
       }
       
       // Initialize... *not* if this we are constructing a temporary Reader
@@ -1563,21 +1584,24 @@
         SerializationFactory serializationFactory =
           new SerializationFactory(conf);
         this.keyDeserializer =
-          getDeserializer(serializationFactory, getKeyClass());
+          getDeserializer(serializationFactory, getKeyClass(), metadata);
         if (!blockCompressed) {
           this.keyDeserializer.open(valBuffer);
         } else {
           this.keyDeserializer.open(keyIn);
         }
         this.valDeserializer =
-          getDeserializer(serializationFactory, getValueClass());
+          getDeserializer(serializationFactory, getValueClass(), metadata);
         this.valDeserializer.open(valIn);
       }
     }
     
     @SuppressWarnings("unchecked")
-    private Deserializer getDeserializer(SerializationFactory sf, Class c) {
-      return sf.getDeserializer(c);
+    private DeserializerBase getDeserializer(SerializationFactory sf, Class c,
+	Metadata metadata) {
+      Map<String, String> stringMetadata = metadata.getMetadataAsStringMap();
+      stringMetadata.put(SerializationBase.CLASS_KEY, c.getName());
+      return sf.getDeserializer(stringMetadata);
     }
     
     /** Close the file. */
@@ -2188,6 +2212,14 @@
         return;
       }
 
+      if (position < headerEnd) {
+        // seek directly to first record
+        in.seek(headerEnd);
+        // note the sync marker "seen" in the header
+        syncSeen = true;
+        return;
+      }
+
       try {
         seek(position+4);                         // skip escape
         in.readFully(syncCheck);

Modified: hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/WritableUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/WritableUtils.java?rev=885142&r1=885141&r2=885142&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/WritableUtils.java (original)
+++ hadoop/common/branches/HADOOP-6194/src/java/org/apache/hadoop/io/WritableUtils.java Sat Nov 28 19:53:33 2009
@@ -415,4 +415,29 @@
     }
     return out.getData();
   }
+
+  /**
+   * Read a string, but check it for sanity. The format consists of a vint
+   * followed by the given number of bytes.
+   * @param in the stream to read from
+   * @param maxLength the largest acceptable length of the encoded string
+   * @return the bytes as a string
+   * @throws IOException if reading from the DataInput fails
+   * @throws IllegalArgumentException if the encoded byte size for string 
+             is negative or larger than maxSize. Only the vint is read.
+   */
+  public static String readStringSafely(DataInput in,
+                                        int maxLength
+                                        ) throws IOException, 
+                                                 IllegalArgumentException {
+    int length = readVInt(in);
+    if (length < 0 || length > maxLength) {
+      throw new IllegalArgumentException("Encoded byte size for String was " + length + 
+                                         ", which is outside of 0.." +
+                                         maxLength + " range.");
+    }
+    byte [] bytes = new byte[length];
+    in.readFully(bytes, 0, length);
+    return Text.decode(bytes);
+  }
 }