You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:27:38 UTC

svn commit: r1399950 [5/17] - in /hadoop/common/branches/HDFS-2802/hadoop-common-project: hadoop-annotations/ hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ hadoop-auth-examples/ hadoop-auth/ hadoop-auth/src/main/java/org/apac...

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java Fri Oct 19 02:25:55 2012
@@ -24,6 +24,7 @@ import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -45,18 +46,23 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ShutdownHookManager;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /****************************************************************
  * An abstract base class for a fairly generic filesystem.  It
  * may be implemented as a distributed filesystem, or as a "local"
@@ -142,6 +148,7 @@ public abstract class FileSystem extends
     UserGroupInformation ugi =
         UserGroupInformation.getBestUGI(ticketCachePath, user);
     return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
       public FileSystem run() throws IOException {
         return get(uri, conf);
       }
@@ -222,15 +229,25 @@ public abstract class FileSystem extends
 
   /**
    * Get a canonical service name for this file system.  The token cache is
-   * the only user of this value, and uses it to lookup this filesystem's
-   * service tokens.  The token cache will not attempt to acquire tokens if the
-   * service is null.
+   * the only user of the canonical service name, and uses it to lookup this
+   * filesystem's service tokens.
+   * If file system provides a token of its own then it must have a canonical
+   * name, otherwise canonical name can be null.
+   * 
+   * Default Impl: If the file system has child file systems 
+   * (such as an embedded file system) then it is assumed that the fs has no
+   * tokens of its own and hence returns a null name; otherwise a service
+   * name is built using Uri and port.
+   * 
    * @return a service string that uniquely identifies this file system, null
    *         if the filesystem does not implement tokens
    * @see SecurityUtil#buildDTServiceName(URI, int) 
    */
+  @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
   public String getCanonicalServiceName() {
-    return SecurityUtil.buildDTServiceName(getUri(), getDefaultPort());
+    return (getChildFileSystems() == null)
+      ? SecurityUtil.buildDTServiceName(getUri(), getDefaultPort())
+      : null;
   }
 
   /** @deprecated call #getUri() instead.*/
@@ -280,11 +297,11 @@ public abstract class FileSystem extends
     String scheme = uri.getScheme();
     String authority = uri.getAuthority();
 
-    if (scheme == null) {                       // no scheme: use default FS
+    if (scheme == null && authority == null) {     // use default FS
       return get(conf);
     }
 
-    if (authority == null) {                       // no authority
+    if (scheme != null && authority == null) {     // no authority
       URI defaultUri = getDefaultUri(conf);
       if (scheme.equals(defaultUri.getScheme())    // if scheme matches default
           && defaultUri.getAuthority() != null) {  // & default has authority
@@ -317,6 +334,7 @@ public abstract class FileSystem extends
     UserGroupInformation ugi =
         UserGroupInformation.getBestUGI(ticketCachePath, user);
     return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
       public FileSystem run() throws IOException {
         return newInstance(uri,conf); 
       }
@@ -396,68 +414,95 @@ public abstract class FileSystem extends
   }
     
   /**
-   * Deprecated  - use @link {@link #getDelegationTokens(String)}
    * Get a new delegation token for this file system.
+   * This is an internal method that should have been declared protected
+   * but wasn't historically.
+   * Callers should use {@link #addDelegationTokens(String, Credentials)}
+   * 
    * @param renewer the account name that is allowed to renew the token.
    * @return a new delegation token
    * @throws IOException
    */
-  @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-  @Deprecated
+  @InterfaceAudience.Private()
   public Token<?> getDelegationToken(String renewer) throws IOException {
     return null;
   }
   
   /**
-   * Get one or more delegation tokens associated with the filesystem. Normally
-   * a file system returns a single delegation token. A file system that manages
-   * multiple file systems underneath, could return set of delegation tokens for
-   * all the file systems it manages.
+   * Obtain all delegation tokens used by this FileSystem that are not
+   * already present in the given Credentials.  Existing tokens will neither
+   * be verified as valid nor having the given renewer.  Missing tokens will
+   * be acquired and added to the given Credentials.
+   * 
+   * Default Impl: works for simple fs with its own token
+   * and also for an embedded fs whose tokens are those of its
+   * children file system (i.e. the embedded fs has not tokens of its
+   * own).
    * 
-   * @param renewer the account name that is allowed to renew the token.
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
    * @return list of new delegation tokens
-   *    If delegation tokens not supported then return a list of size zero.
    * @throws IOException
    */
-  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
-  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    return new ArrayList<Token<?>>(0);
+  @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+  public Token<?>[] addDelegationTokens(
+      final String renewer, Credentials credentials) throws IOException {
+    if (credentials == null) {
+      credentials = new Credentials();
+    }
+    final List<Token<?>> tokens = new ArrayList<Token<?>>();
+    collectDelegationTokens(renewer, credentials, tokens);
+    return tokens.toArray(new Token<?>[tokens.size()]);
   }
   
   /**
-   * @see #getDelegationTokens(String)
-   * This is similar to getDelegationTokens, with the added restriction that if
-   * a token is already present in the passed Credentials object - that token
-   * is returned instead of a new delegation token. 
-   * 
-   * If the token is found to be cached in the Credentials object, this API does
-   * not verify the token validity or the passed in renewer. 
-   * 
-   * 
-   * @param renewer the account name that is allowed to renew the token.
-   * @param credentials a Credentials object containing already knowing 
-   *   delegationTokens.
-   * @return a list of delegation tokens.
+   * Recursively obtain the tokens for this FileSystem and all descended
+   * FileSystems as determined by getChildFileSystems().
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add the new delegation tokens
+   * @param tokens list in which to add acquired tokens
    * @throws IOException
    */
-  @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
-  public List<Token<?>> getDelegationTokens(String renewer,
-      Credentials credentials) throws IOException {
-    List<Token<?>> allTokens = getDelegationTokens(renewer);
-    List<Token<?>> newTokens = new ArrayList<Token<?>>();
-    if (allTokens != null) {
-      for (Token<?> token : allTokens) {
-        Token<?> knownToken = credentials.getToken(token.getService());
-        if (knownToken == null) {
-          newTokens.add(token);
-        } else {
-          newTokens.add(knownToken);
+  private void collectDelegationTokens(final String renewer,
+                                       final Credentials credentials,
+                                       final List<Token<?>> tokens)
+                                           throws IOException {
+    final String serviceName = getCanonicalServiceName();
+    // Collect token of the this filesystem and then of its embedded children
+    if (serviceName != null) { // fs has token, grab it
+      final Text service = new Text(serviceName);
+      Token<?> token = credentials.getToken(service);
+      if (token == null) {
+        token = getDelegationToken(renewer);
+        if (token != null) {
+          tokens.add(token);
+          credentials.addToken(service, token);
         }
       }
     }
-    return newTokens;
+    // Now collect the tokens from the children
+    final FileSystem[] children = getChildFileSystems();
+    if (children != null) {
+      for (final FileSystem fs : children) {
+        fs.collectDelegationTokens(renewer, credentials, tokens);
+      }
+    }
   }
 
+  /**
+   * Get all the immediate child FileSystems embedded in this FileSystem.
+   * It does not recurse and get grand children.  If a FileSystem
+   * has multiple child FileSystems, then it should return a unique list
+   * of those FileSystems.  Default is to return null to signify no children.
+   * 
+   * @return FileSystems used by this FileSystem
+   */
+  @InterfaceAudience.LimitedPrivate({ "HDFS" })
+  @VisibleForTesting
+  public FileSystem[] getChildFileSystems() {
+    return null;
+  }
+  
   /** create a file with the provided permission
    * The permission of the file is set to be the provided permission as in
    * setPermission, not permission&~umask
@@ -572,7 +617,7 @@ public abstract class FileSystem extends
       throw new IllegalArgumentException("Invalid start or len parameter");
     }
 
-    if (file.getLen() < start) {
+    if (file.getLen() <= start) {
       return new BlockLocation[0];
 
     }
@@ -616,11 +661,17 @@ public abstract class FileSystem extends
   @Deprecated
   public FsServerDefaults getServerDefaults() throws IOException {
     Configuration conf = getConf();
+    // CRC32 is chosen as default as it is available in all 
+    // releases that support checksum.
+    // The client trash configuration is ignored.
     return new FsServerDefaults(getDefaultBlockSize(), 
         conf.getInt("io.bytes.per.checksum", 512), 
         64 * 1024, 
         getDefaultReplication(),
-        conf.getInt("io.file.buffer.size", 4096));
+        conf.getInt("io.file.buffer.size", 4096),
+        false,
+        CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
+        DataChecksum.Type.CRC32);
   }
 
   /**
@@ -846,11 +897,40 @@ public abstract class FileSystem extends
       short replication,
       long blockSize,
       Progressable progress) throws IOException {
-    // only DFS support this
-    return create(f, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize, progress);
+    return create(f, permission, flags, bufferSize, replication,
+        blockSize, progress, null);
   }
   
-  
+  /**
+   * Create an FSDataOutputStream at the indicated Path with a custom
+   * checksum option
+   * @param f the file name to open
+   * @param permission
+   * @param flags {@link CreateFlag}s to use for this stream.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file.
+   * @param blockSize
+   * @param progress
+   * @param checksumOpt checksum parameter. If null, the values
+   *        found in conf will be used.
+   * @throws IOException
+   * @see #setPermission(Path, FsPermission)
+   */
+  public FSDataOutputStream create(Path f,
+      FsPermission permission,
+      EnumSet<CreateFlag> flags,
+      int bufferSize,
+      short replication,
+      long blockSize,
+      Progressable progress,
+      ChecksumOpt checksumOpt) throws IOException {
+    // Checksum options are ignored by default. The file systems that
+    // implement checksum need to override this method. The full
+    // support is currently only available in DFS.
+    return create(f, permission, flags.contains(CreateFlag.OVERWRITE), 
+        bufferSize, replication, blockSize, progress);
+  }
+
   /*.
    * This create has been added to support the FileContext that processes
    * the permission
@@ -862,7 +942,7 @@ public abstract class FileSystem extends
   protected FSDataOutputStream primitiveCreate(Path f,
      FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
      short replication, long blockSize, Progressable progress,
-     int bytesPerChecksum) throws IOException {
+     ChecksumOpt checksumOpt) throws IOException {
 
     boolean pathExists = exists(f);
     CreateFlag.validate(f, pathExists, flag);
@@ -1214,6 +1294,16 @@ public abstract class FileSystem extends
     }
     return true;
   }
+  
+  /**
+   * Cancel the deletion of the path when the FileSystem is closed
+   * @param f the path to cancel deletion
+   */
+  public boolean cancelDeleteOnExit(Path f) {
+    synchronized (deleteOnExit) {
+      return deleteOnExit.remove(f);
+    }
+  }
 
   /**
    * Delete all files that were marked as delete-on-exit. This recursively
@@ -1224,7 +1314,9 @@ public abstract class FileSystem extends
       for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) {
         Path path = iter.next();
         try {
-          delete(path, true);
+          if (exists(path)) {
+            delete(path, true);
+          }
         }
         catch (IOException e) {
           LOG.info("Ignoring failure to deleteOnExit for path " + path);
@@ -1300,10 +1392,11 @@ public abstract class FileSystem extends
   }
 
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {
-      public boolean accept(Path file) {
-        return true;
-      }     
-    };
+    @Override
+    public boolean accept(Path file) {
+      return true;
+    }
+  };
     
   /**
    * List the statuses of the files/directories in the given path if the path is
@@ -1467,128 +1560,128 @@ public abstract class FileSystem extends
   }
   
   /**
-   * Return an array of FileStatus objects whose path names match pathPattern
-   * and is accepted by the user-supplied path filter. Results are sorted by
-   * their path names.
-   * Return null if pathPattern has no glob and the path does not exist.
-   * Return an empty array if pathPattern has a glob and no path matches it. 
-   * 
-   * @param pathPattern
-   *          a regular expression specifying the path pattern
-   * @param filter
-   *          a user-supplied path filter
-   * @return an array of FileStatus objects
+   * Return an array of FileStatus objects whose path names match
+   * {@code pathPattern} and is accepted by the user-supplied path filter.
+   * Results are sorted by their path names.
+   * 
+   * @param pathPattern a regular expression specifying the path pattern
+   * @param filter a user-supplied path filter
+   * @return null if {@code pathPattern} has no glob and the path does not exist
+   *         an empty array if {@code pathPattern} has a glob and no path
+   *         matches it else an array of {@link FileStatus} objects matching the
+   *         pattern
    * @throws IOException if any I/O error occurs when fetching file status
    */
   public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
       throws IOException {
     String filename = pathPattern.toUri().getPath();
+    List<FileStatus> allMatches = null;
+    
     List<String> filePatterns = GlobExpander.expand(filename);
-    if (filePatterns.size() == 1) {
-      return globStatusInternal(pathPattern, filter);
-    } else {
-      List<FileStatus> results = new ArrayList<FileStatus>();
-      for (String filePattern : filePatterns) {
-        FileStatus[] files = globStatusInternal(new Path(filePattern), filter);
-        for (FileStatus file : files) {
-          results.add(file);
+    for (String filePattern : filePatterns) {
+      Path path = new Path(filePattern.isEmpty() ? Path.CUR_DIR : filePattern);
+      List<FileStatus> matches = globStatusInternal(path, filter);
+      if (matches != null) {
+        if (allMatches == null) {
+          allMatches = matches;
+        } else {
+          allMatches.addAll(matches);
         }
       }
-      return results.toArray(new FileStatus[results.size()]);
     }
+    
+    FileStatus[] results = null;
+    if (allMatches != null) {
+      results = allMatches.toArray(new FileStatus[allMatches.size()]);
+    } else if (filePatterns.size() > 1) {
+      // no matches with multiple expansions is a non-matching glob 
+      results = new FileStatus[0];
+    }
+    return results;
   }
 
-  private FileStatus[] globStatusInternal(Path pathPattern, PathFilter filter)
-      throws IOException {
-    Path[] parents = new Path[1];
+  // sort gripes because FileStatus Comparable isn't parameterized...
+  @SuppressWarnings("unchecked") 
+  private List<FileStatus> globStatusInternal(Path pathPattern,
+      PathFilter filter) throws IOException {
+    boolean patternHasGlob = false;       // pathPattern has any globs
+    List<FileStatus> matches = new ArrayList<FileStatus>();
+
+    // determine starting point
     int level = 0;
-    String filename = pathPattern.toUri().getPath();
+    String baseDir = Path.CUR_DIR;
+    if (pathPattern.isAbsolute()) {
+      level = 1; // need to skip empty item at beginning of split list
+      baseDir = Path.SEPARATOR;
+    }
     
-    // path has only zero component
-    if ("".equals(filename) || Path.SEPARATOR.equals(filename)) {
-      return getFileStatus(new Path[]{pathPattern});
+    // parse components and determine if it's a glob
+    String[] components = null;
+    GlobFilter[] filters = null;
+    String filename = pathPattern.toUri().getPath();
+    if (!filename.isEmpty() && !Path.SEPARATOR.equals(filename)) {
+      components = filename.split(Path.SEPARATOR);
+      filters = new GlobFilter[components.length];
+      for (int i=level; i < components.length; i++) {
+        filters[i] = new GlobFilter(components[i]);
+        patternHasGlob |= filters[i].hasPattern();
+      }
+      if (!patternHasGlob) {
+        baseDir = unquotePathComponent(filename);
+        components = null; // short through to filter check
+      }
     }
-
-    // path has at least one component
-    String[] components = filename.split(Path.SEPARATOR);
-    // get the first component
-    if (pathPattern.isAbsolute()) {
-      parents[0] = new Path(Path.SEPARATOR);
-      level = 1;
-    } else {
-      parents[0] = new Path(Path.CUR_DIR);
+    
+    // seed the parent directory path, return if it doesn't exist
+    try {
+      matches.add(getFileStatus(new Path(baseDir)));
+    } catch (FileNotFoundException e) {
+      return patternHasGlob ? matches : null;
     }
-
-    // glob the paths that match the parent path, i.e., [0, components.length-1]
-    boolean[] hasGlob = new boolean[]{false};
-    Path[] parentPaths = globPathsLevel(parents, components, level, hasGlob);
-    FileStatus[] results;
-    if (parentPaths == null || parentPaths.length == 0) {
-      results = null;
-    } else {
-      // Now work on the last component of the path
-      GlobFilter fp = new GlobFilter(components[components.length - 1], filter);
-      if (fp.hasPattern()) { // last component has a pattern
-        // list parent directories and then glob the results
-        results = listStatus(parentPaths, fp);
-        hasGlob[0] = true;
-      } else { // last component does not have a pattern
-        // remove the quoting of metachars in a non-regexp expansion
-        String name = unquotePathComponent(components[components.length - 1]);
-        // get all the path names
-        ArrayList<Path> filteredPaths = new ArrayList<Path>(parentPaths.length);
-        for (int i = 0; i < parentPaths.length; i++) {
-          parentPaths[i] = new Path(parentPaths[i], name);
-          if (fp.accept(parentPaths[i])) {
-            filteredPaths.add(parentPaths[i]);
+    
+    // skip if there are no components other than the basedir
+    if (components != null) {
+      // iterate through each path component
+      for (int i=level; (i < components.length) && !matches.isEmpty(); i++) {
+        List<FileStatus> children = new ArrayList<FileStatus>();
+        for (FileStatus match : matches) {
+          // don't look for children in a file matched by a glob
+          if (!match.isDirectory()) {
+            continue;
+          }
+          try {
+            if (filters[i].hasPattern()) {
+              // get all children matching the filter
+              FileStatus[] statuses = listStatus(match.getPath(), filters[i]);
+              children.addAll(Arrays.asList(statuses));
+            } else {
+              // the component does not have a pattern
+              String component = unquotePathComponent(components[i]);
+              Path child = new Path(match.getPath(), component);
+              children.add(getFileStatus(child));
+            }
+          } catch (FileNotFoundException e) {
+            // don't care
           }
         }
-        // get all their statuses
-        results = getFileStatus(
-            filteredPaths.toArray(new Path[filteredPaths.size()]));
+        matches = children;
       }
     }
-
-    // Decide if the pathPattern contains a glob or not
-    if (results == null) {
-      if (hasGlob[0]) {
-        results = new FileStatus[0];
-      }
-    } else {
-      if (results.length == 0 ) {
-        if (!hasGlob[0]) {
-          results = null;
+    // remove anything that didn't match the filter
+    if (!matches.isEmpty()) {
+      Iterator<FileStatus> iter = matches.iterator();
+      while (iter.hasNext()) {
+        if (!filter.accept(iter.next().getPath())) {
+          iter.remove();
         }
-      } else {
-        Arrays.sort(results);
       }
     }
-    return results;
-  }
-
-  /*
-   * For a path of N components, return a list of paths that match the
-   * components [<code>level</code>, <code>N-1</code>].
-   */
-  private Path[] globPathsLevel(Path[] parents, String[] filePattern,
-      int level, boolean[] hasGlob) throws IOException {
-    if (level == filePattern.length - 1)
-      return parents;
-    if (parents == null || parents.length == 0) {
-      return null;
-    }
-    GlobFilter fp = new GlobFilter(filePattern[level]);
-    if (fp.hasPattern()) {
-      parents = FileUtil.stat2Paths(listStatus(parents, fp));
-      hasGlob[0] = true;
-    } else { // the component does not have a pattern
-      // remove the quoting of metachars in a non-regexp expansion
-      String name = unquotePathComponent(filePattern[level]);
-      for (int i = 0; i < parents.length; i++) {
-        parents[i] = new Path(parents[i], name);
-      }
+    // no final paths, if there were any globs return empty list
+    if (matches.isEmpty()) {
+      return patternHasGlob ? matches : null;
     }
-    return globPathsLevel(parents, filePattern, level + 1, hasGlob);
+    Collections.sort(matches);
+    return matches;
   }
 
   /**
@@ -1959,6 +2052,7 @@ public abstract class FileSystem extends
    * No more filesystem operations are needed.  Will
    * release any held locks.
    */
+  @Override
   public void close() throws IOException {
     // delete all files that were marked as delete-on-exit.
     processDeleteOnExit();
@@ -2064,30 +2158,6 @@ public abstract class FileSystem extends
   }
 
   /**
-   * Return a list of file status objects that corresponds to the list of paths
-   * excluding those non-existent paths.
-   * 
-   * @param paths
-   *          the list of paths we want information from
-   * @return a list of FileStatus objects
-   * @throws IOException
-   *           see specific implementation
-   */
-  private FileStatus[] getFileStatus(Path[] paths) throws IOException {
-    if (paths == null) {
-      return null;
-    }
-    ArrayList<FileStatus> results = new ArrayList<FileStatus>(paths.length);
-    for (int i = 0; i < paths.length; i++) {
-      try {
-        results.add(getFileStatus(paths[i]));
-      } catch (FileNotFoundException e) { // do nothing
-      }
-    }
-    return results.toArray(new FileStatus[results.size()]);
-  }
-  
-  /**
    * Returns a status object describing the use and capacity of the
    * file system. If the file system has multiple partitions, the
    * use and capacity of the root partition is reflected.
@@ -2296,6 +2366,7 @@ public abstract class FileSystem extends
     }
 
     private class ClientFinalizer implements Runnable {
+      @Override
       public synchronized void run() {
         try {
           closeAll(true);
@@ -2350,7 +2421,7 @@ public abstract class FileSystem extends
         this.ugi = UserGroupInformation.getCurrentUser();
       }
 
-      /** {@inheritDoc} */
+      @Override
       public int hashCode() {
         return (scheme + authority).hashCode() + ugi.hashCode() + (int)unique;
       }
@@ -2359,7 +2430,7 @@ public abstract class FileSystem extends
         return a == b || (a != null && a.equals(b));        
       }
 
-      /** {@inheritDoc} */
+      @Override
       public boolean equals(Object obj) {
         if (obj == this) {
           return true;
@@ -2374,7 +2445,7 @@ public abstract class FileSystem extends
         return false;        
       }
 
-      /** {@inheritDoc} */
+      @Override
       public String toString() {
         return "("+ugi.toString() + ")@" + scheme + "://" + authority;        
       }
@@ -2487,6 +2558,7 @@ public abstract class FileSystem extends
       return writeOps.get();
     }
 
+    @Override
     public String toString() {
       return bytesRead + " bytes read, " + bytesWritten + " bytes written, "
           + readOps + " read ops, " + largeReadOps + " large read ops, "

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java Fri Oct 19 02:25:55 2012
@@ -414,9 +414,11 @@ public class FileUtil {
     String getResult() throws IOException {
       return result;
     }
+    @Override
     protected String[] getExecString() {
       return command;
     }
+    @Override
     protected void parseExecResult(BufferedReader lines) throws IOException {
       String line = lines.readLine();
       if (line == null) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java Fri Oct 19 02:25:55 2012
@@ -22,15 +22,12 @@ import java.io.*;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.EnumSet;
-import java.util.List;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.util.Progressable;
 
 /****************************************************************
@@ -79,6 +76,7 @@ public class FilterFileSystem extends Fi
    *   for this FileSystem
    * @param conf the configuration
    */
+  @Override
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
     // this is less than ideal, but existing filesystems sometimes neglect
@@ -93,6 +91,7 @@ public class FilterFileSystem extends Fi
   }
 
   /** Returns a URI whose scheme and authority identify this FileSystem.*/
+  @Override
   public URI getUri() {
     return fs.getUri();
   }
@@ -107,6 +106,7 @@ public class FilterFileSystem extends Fi
   }
   
   /** Make sure that a path specifies a FileSystem. */
+  @Override
   public Path makeQualified(Path path) {
     Path fqPath = fs.makeQualified(path);
     // swap in our scheme if the filtered fs is using a different scheme
@@ -128,10 +128,12 @@ public class FilterFileSystem extends Fi
   ///////////////////////////////////////////////////////////////
 
   /** Check that a Path belongs to this FileSystem. */
+  @Override
   protected void checkPath(Path path) {
     fs.checkPath(path);
   }
 
+  @Override
   public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
     long len) throws IOException {
       return fs.getFileBlockLocations(file, start, len);
@@ -146,17 +148,17 @@ public class FilterFileSystem extends Fi
    * @param f the file name to open
    * @param bufferSize the size of the buffer to be used.
    */
+  @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     return fs.open(f, bufferSize);
   }
 
-  /** {@inheritDoc} */
+  @Override
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
     return fs.append(f, bufferSize, progress);
   }
 
-  /** {@inheritDoc} */
   @Override
   public FSDataOutputStream create(Path f, FsPermission permission,
       boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -174,6 +176,7 @@ public class FilterFileSystem extends Fi
    * @return true if successful;
    *         false if file does not exist or is a directory
    */
+  @Override
   public boolean setReplication(Path src, short replication) throws IOException {
     return fs.setReplication(src, replication);
   }
@@ -182,40 +185,23 @@ public class FilterFileSystem extends Fi
    * Renames Path src to Path dst.  Can take place on local fs
    * or remote DFS.
    */
+  @Override
   public boolean rename(Path src, Path dst) throws IOException {
     return fs.rename(src, dst);
   }
   
   /** Delete a file */
+  @Override
   public boolean delete(Path f, boolean recursive) throws IOException {
     return fs.delete(f, recursive);
   }
   
-  /**
-   * Mark a path to be deleted when FileSystem is closed.
-   * When the JVM shuts down,
-   * all FileSystem objects will be closed automatically.
-   * Then,
-   * the marked path will be deleted as a result of closing the FileSystem.
-   *
-   * The path has to exist in the file system.
-   * 
-   * @param f the path to delete.
-   * @return  true if deleteOnExit is successful, otherwise false.
-   * @throws IOException
-   */
-  public boolean deleteOnExit(Path f) throws IOException {
-    return fs.deleteOnExit(f);
-  }    
-
   /** List files in a directory. */
+  @Override
   public FileStatus[] listStatus(Path f) throws IOException {
     return fs.listStatus(f);
   }
 
-  /**
-   * {@inheritDoc}
-   */
   @Override
   public RemoteIterator<Path> listCorruptFileBlocks(Path path)
     throws IOException {
@@ -223,11 +209,13 @@ public class FilterFileSystem extends Fi
   }
 
   /** List files and its block locations in a directory. */
+  @Override
   public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
   throws IOException {
     return fs.listLocatedStatus(f);
   }
   
+  @Override
   public Path getHomeDirectory() {
     return fs.getHomeDirectory();
   }
@@ -239,6 +227,7 @@ public class FilterFileSystem extends Fi
    * 
    * @param newDir
    */
+  @Override
   public void setWorkingDirectory(Path newDir) {
     fs.setWorkingDirectory(newDir);
   }
@@ -248,21 +237,21 @@ public class FilterFileSystem extends Fi
    * 
    * @return the directory pathname
    */
+  @Override
   public Path getWorkingDirectory() {
     return fs.getWorkingDirectory();
   }
   
+  @Override
   protected Path getInitialWorkingDirectory() {
     return fs.getInitialWorkingDirectory();
   }
   
-  /** {@inheritDoc} */
   @Override
   public FsStatus getStatus(Path p) throws IOException {
     return fs.getStatus(p);
   }
   
-  /** {@inheritDoc} */
   @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     return fs.mkdirs(f, permission);
@@ -274,6 +263,7 @@ public class FilterFileSystem extends Fi
    * the given dst name.
    * delSrc indicates if the source should be removed
    */
+  @Override
   public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     fs.copyFromLocalFile(delSrc, src, dst);
@@ -284,6 +274,7 @@ public class FilterFileSystem extends Fi
    * the given dst name.
    * delSrc indicates if the source should be removed
    */
+  @Override
   public void copyFromLocalFile(boolean delSrc, boolean overwrite, 
                                 Path[] srcs, Path dst)
     throws IOException {
@@ -295,6 +286,7 @@ public class FilterFileSystem extends Fi
    * the given dst name.
    * delSrc indicates if the source should be removed
    */
+  @Override
   public void copyFromLocalFile(boolean delSrc, boolean overwrite, 
                                 Path src, Path dst)
     throws IOException {
@@ -306,6 +298,7 @@ public class FilterFileSystem extends Fi
    * Copy it from FS control to the local dst name.
    * delSrc indicates if the src will be removed or not.
    */   
+  @Override
   public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     fs.copyToLocalFile(delSrc, src, dst);
@@ -317,6 +310,7 @@ public class FilterFileSystem extends Fi
    * file.  If the FS is local, we write directly into the target.  If
    * the FS is remote, we write into the tmp local area.
    */
+  @Override
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
@@ -328,12 +322,14 @@ public class FilterFileSystem extends Fi
    * FS will copy the contents of tmpLocalFile to the correct target at
    * fsOutputFile.
    */
+  @Override
   public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
   }
 
   /** Return the total size of all files in the filesystem.*/
+  @Override
   public long getUsed() throws IOException{
     return fs.getUsed();
   }
@@ -377,23 +373,24 @@ public class FilterFileSystem extends Fi
   /**
    * Get file status.
    */
+  @Override
   public FileStatus getFileStatus(Path f) throws IOException {
     return fs.getFileStatus(f);
   }
 
-  /** {@inheritDoc} */
+  @Override
   public FileChecksum getFileChecksum(Path f) throws IOException {
     return fs.getFileChecksum(f);
   }
   
-  /** {@inheritDoc} */
+  @Override
   public void setVerifyChecksum(boolean verifyChecksum) {
     fs.setVerifyChecksum(verifyChecksum);
   }
   
   @Override
   public void setWriteChecksum(boolean writeChecksum) {
-    fs.setVerifyChecksum(writeChecksum);
+    fs.setWriteChecksum(writeChecksum);
   }
 
   @Override
@@ -407,21 +404,18 @@ public class FilterFileSystem extends Fi
     fs.close();
   }
 
-  /** {@inheritDoc} */
   @Override
   public void setOwner(Path p, String username, String groupname
       ) throws IOException {
     fs.setOwner(p, username, groupname);
   }
 
-  /** {@inheritDoc} */
   @Override
   public void setTimes(Path p, long mtime, long atime
       ) throws IOException {
     fs.setTimes(p, mtime, atime);
   }
 
-  /** {@inheritDoc} */
   @Override
   public void setPermission(Path p, FsPermission permission
       ) throws IOException {
@@ -431,10 +425,11 @@ public class FilterFileSystem extends Fi
   @Override
   protected FSDataOutputStream primitiveCreate(Path f,
       FsPermission absolutePermission, EnumSet<CreateFlag> flag,
-      int bufferSize, short replication, long blockSize, Progressable progress, int bytesPerChecksum)
+      int bufferSize, short replication, long blockSize,
+      Progressable progress, ChecksumOpt checksumOpt)
       throws IOException {
     return fs.primitiveCreate(f, absolutePermission, flag,
-        bufferSize, replication, blockSize, progress, bytesPerChecksum);
+        bufferSize, replication, blockSize, progress, checksumOpt);
   }
 
   @Override
@@ -445,25 +440,7 @@ public class FilterFileSystem extends Fi
   }
   
   @Override // FileSystem
-  public String getCanonicalServiceName() {
-    return fs.getCanonicalServiceName();
-  }
-  
-  @Override // FileSystem
-  @SuppressWarnings("deprecation")
-  public Token<?> getDelegationToken(String renewer) throws IOException {
-    return fs.getDelegationToken(renewer);
-  }
-  
-  @Override // FileSystem
-  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    return fs.getDelegationTokens(renewer);
-  }
-  
-  @Override
-  // FileSystem
-  public List<Token<?>> getDelegationTokens(String renewer,
-      Credentials credentials) throws IOException {
-    return fs.getDelegationTokens(renewer, credentials);
+  public FileSystem[] getChildFileSystems() {
+    return new FileSystem[]{fs};
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java Fri Oct 19 02:25:55 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
@@ -81,11 +82,11 @@ public abstract class FilterFs extends A
   public FSDataOutputStream createInternal(Path f,
     EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
     short replication, long blockSize, Progressable progress,
-    int bytesPerChecksum, boolean createParent) 
+    ChecksumOpt checksumOpt, boolean createParent) 
       throws IOException, UnresolvedLinkException {
     checkPath(f);
     return myFs.createInternal(f, flag, absolutePermission, bufferSize,
-        replication, blockSize, progress, bytesPerChecksum, createParent);
+        replication, blockSize, progress, checksumOpt, createParent);
   }
 
   @Override
@@ -173,9 +174,6 @@ public abstract class FilterFs extends A
     return myFs.listStatus(f);
   }
 
-  /**
-   * {@inheritDoc}
-   */
   @Override
   public RemoteIterator<Path> listCorruptFileBlocks(Path path)
     throws IOException {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java Fri Oct 19 02:25:55 2012
@@ -26,6 +26,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.DataChecksum;
 
 /****************************************************
  * Provides server default configuration values to clients.
@@ -37,6 +39,7 @@ public class FsServerDefaults implements
 
   static { // register a ctor
     WritableFactories.setFactory(FsServerDefaults.class, new WritableFactory() {
+      @Override
       public Writable newInstance() {
         return new FsServerDefaults();
       }
@@ -48,17 +51,25 @@ public class FsServerDefaults implements
   private int writePacketSize;
   private short replication;
   private int fileBufferSize;
+  private boolean encryptDataTransfer;
+  private long trashInterval;
+  private DataChecksum.Type checksumType;
 
   public FsServerDefaults() {
   }
 
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
-      int writePacketSize, short replication, int fileBufferSize) {
+      int writePacketSize, short replication, int fileBufferSize,
+      boolean encryptDataTransfer, long trashInterval,
+      DataChecksum.Type checksumType) {
     this.blockSize = blockSize;
     this.bytesPerChecksum = bytesPerChecksum;
     this.writePacketSize = writePacketSize;
     this.replication = replication;
     this.fileBufferSize = fileBufferSize;
+    this.encryptDataTransfer = encryptDataTransfer;
+    this.trashInterval = trashInterval;
+    this.checksumType = checksumType;
   }
 
   public long getBlockSize() {
@@ -80,10 +91,23 @@ public class FsServerDefaults implements
   public int getFileBufferSize() {
     return fileBufferSize;
   }
+  
+  public boolean getEncryptDataTransfer() {
+    return encryptDataTransfer;
+  }
+
+  public long getTrashInterval() {
+    return trashInterval;
+  }
+
+  public DataChecksum.Type getChecksumType() {
+    return checksumType;
+  }
 
   // /////////////////////////////////////////
   // Writable
   // /////////////////////////////////////////
+  @Override
   @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {
     out.writeLong(blockSize);
@@ -91,8 +115,10 @@ public class FsServerDefaults implements
     out.writeInt(writePacketSize);
     out.writeShort(replication);
     out.writeInt(fileBufferSize);
+    WritableUtils.writeEnum(out, checksumType);
   }
 
+  @Override
   @InterfaceAudience.Private
   public void readFields(DataInput in) throws IOException {
     blockSize = in.readLong();
@@ -100,5 +126,6 @@ public class FsServerDefaults implements
     writePacketSize = in.readInt();
     replication = in.readShort();
     fileBufferSize = in.readInt();
+    checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java Fri Oct 19 02:25:55 2012
@@ -236,6 +236,7 @@ public class FsShell extends Configured 
   /**
    * run
    */
+  @Override
   public int run(String argv[]) throws Exception {
     // initialize FsShell
     init();

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java Fri Oct 19 02:25:55 2012
@@ -60,12 +60,14 @@ public class FsStatus implements Writabl
   //////////////////////////////////////////////////
   // Writable
   //////////////////////////////////////////////////
+  @Override
   public void write(DataOutput out) throws IOException {
     out.writeLong(capacity);
     out.writeLong(used);
     out.writeLong(remaining);
   }
 
+  @Override
   public void readFields(DataInput in) throws IOException {
     capacity = in.readLong();
     used = in.readLong();

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java Fri Oct 19 02:25:55 2012
@@ -53,7 +53,6 @@ class FsUrlConnection extends URLConnect
     }
   }
 
-  /* @inheritDoc */
   @Override
   public InputStream getInputStream() throws IOException {
     if (is == null) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java Fri Oct 19 02:25:55 2012
@@ -59,6 +59,7 @@ public class FsUrlStreamHandlerFactory i
     this.handler = new FsUrlStreamHandler(this.conf);
   }
 
+  @Override
   public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
     if (!protocols.containsKey(protocol)) {
       boolean known = true;

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java Fri Oct 19 02:25:55 2012
@@ -39,12 +39,26 @@ class GlobExpander {
   }
   
   /**
-   * Expand globs in the given <code>filePattern</code> into a collection of 
-   * file patterns so that in the expanded set no file pattern has a
-   * slash character ("/") in a curly bracket pair.
+   * Expand globs in the given <code>filePattern</code> into a collection of
+   * file patterns so that in the expanded set no file pattern has a slash
+   * character ("/") in a curly bracket pair.
+   * <p>
+   * Some examples of how the filePattern is expanded:<br>
+   * <pre>
+   * <b>
+   * filePattern         - Expanded file pattern </b>
+   * {a/b}               - a/b
+   * /}{a/b}             - /}a/b
+   * p{a/b,c/d}s         - pa/bs, pc/ds
+   * {a/b,c/d,{e,f}}     - a/b, c/d, {e,f}
+   * {a/b,c/d}{e,f}      - a/b{e,f}, c/d{e,f}
+   * {a,b}/{b,{c/d,e/f}} - {a,b}/b, {a,b}/c/d, {a,b}/e/f
+   * {a,b}/{c/\d}        - {a,b}/c/d
+   * </pre>
+   * 
    * @param filePattern
    * @return expanded file patterns
-   * @throws IOException 
+   * @throws IOException
    */
   public static List<String> expand(String filePattern) throws IOException {
     List<String> fullyExpanded = new ArrayList<String>();
@@ -65,7 +79,7 @@ class GlobExpander {
   /**
    * Expand the leftmost outer curly bracket pair containing a
    * slash character ("/") in <code>filePattern</code>.
-   * @param filePattern
+   * @param filePatternWithOffset
    * @return expanded file patterns
    * @throws IOException 
    */

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java Fri Oct 19 02:25:55 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.
 @InterfaceStability.Evolving
 public class GlobFilter implements PathFilter {
   private final static PathFilter DEFAULT_FILTER = new PathFilter() {
+      @Override
       public boolean accept(Path file) {
         return true;
       }
@@ -75,6 +76,7 @@ public class GlobFilter implements PathF
     return pattern.hasWildcard();
   }
 
+  @Override
   public boolean accept(Path path) {
     return pattern.matches(path.getName()) && userFilter.accept(path);
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java Fri Oct 19 02:25:55 2012
@@ -24,11 +24,11 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URLDecoder;
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.HashMap;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -52,7 +52,8 @@ import org.apache.hadoop.util.Progressab
 public class HarFileSystem extends FilterFileSystem {
   public static final int VERSION = 3;
 
-  private static final Map<URI, HarMetaData> harMetaCache = new HashMap<URI, HarMetaData>();
+  private static final Map<URI, HarMetaData> harMetaCache =
+      new ConcurrentHashMap<URI, HarMetaData>();
 
   // uri representation of this Har filesystem
   private URI uri;
@@ -105,6 +106,7 @@ public class HarFileSystem extends Filte
    * har:///archivepath. This assumes the underlying filesystem
    * to be used in case not specified.
    */
+  @Override
   public void initialize(URI name, Configuration conf) throws IOException {
     // decode the name
     URI underLyingURI = decodeHarURI(name, conf);
@@ -246,6 +248,7 @@ public class HarFileSystem extends Filte
   /**
    * return the top level archive.
    */
+  @Override
   public Path getWorkingDirectory() {
     return new Path(uri.toString());
   }
@@ -635,6 +638,7 @@ public class HarFileSystem extends Filte
   /**
    * @return null since no checksum algorithm is implemented.
    */
+  @Override
   public FileChecksum getFileChecksum(Path f) {
     return null;
   }
@@ -667,6 +671,7 @@ public class HarFileSystem extends Filte
     throw new IOException("Har: Create not allowed");
   }
   
+  @Override
   public FSDataOutputStream create(Path f,
       FsPermission permission,
       boolean overwrite,
@@ -734,10 +739,12 @@ public class HarFileSystem extends Filte
   /**
    * return the top level archive path.
    */
+  @Override
   public Path getHomeDirectory() {
     return new Path(uri.toString());
   }
   
+  @Override
   public void setWorkingDirectory(Path newDir) {
     //does nothing.
   }
@@ -745,6 +752,7 @@ public class HarFileSystem extends Filte
   /**
    * not implemented.
    */
+  @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     throw new IOException("Har: mkdirs not allowed");
   }
@@ -752,6 +760,7 @@ public class HarFileSystem extends Filte
   /**
    * not implemented.
    */
+  @Override
   public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws 
         IOException {
     throw new IOException("Har: copyfromlocalfile not allowed");
@@ -760,6 +769,7 @@ public class HarFileSystem extends Filte
   /**
    * copies the file in the har filesystem to a local file.
    */
+  @Override
   public void copyToLocalFile(boolean delSrc, Path src, Path dst) 
     throws IOException {
     FileUtil.copy(this, src, getLocal(getConf()), dst, false, getConf());
@@ -768,6 +778,7 @@ public class HarFileSystem extends Filte
   /**
    * not implemented.
    */
+  @Override
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) 
     throws IOException {
     throw new IOException("Har: startLocalOutput not allowed");
@@ -776,6 +787,7 @@ public class HarFileSystem extends Filte
   /**
    * not implemented.
    */
+  @Override
   public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) 
     throws IOException {
     throw new IOException("Har: completeLocalOutput not allowed");
@@ -784,6 +796,7 @@ public class HarFileSystem extends Filte
   /**
    * not implemented.
    */
+  @Override
   public void setOwner(Path p, String username, String groupname)
     throws IOException {
     throw new IOException("Har: setowner not allowed");
@@ -792,6 +805,7 @@ public class HarFileSystem extends Filte
   /**
    * Not implemented.
    */
+  @Override
   public void setPermission(Path p, FsPermission permisssion) 
     throws IOException {
     throw new IOException("Har: setPermission not allowed");
@@ -824,6 +838,7 @@ public class HarFileSystem extends Filte
         this.end = start + length;
       }
       
+      @Override
       public synchronized int available() throws IOException {
         long remaining = end - underLyingStream.getPos();
         if (remaining > (long)Integer.MAX_VALUE) {
@@ -832,6 +847,7 @@ public class HarFileSystem extends Filte
         return (int) remaining;
       }
       
+      @Override
       public synchronized  void close() throws IOException {
         underLyingStream.close();
         super.close();
@@ -846,15 +862,18 @@ public class HarFileSystem extends Filte
       /**
        * reset is not implemented
        */
+      @Override
       public void reset() throws IOException {
         throw new IOException("reset not implemented.");
       }
       
+      @Override
       public synchronized int read() throws IOException {
         int ret = read(oneBytebuff, 0, 1);
         return (ret <= 0) ? -1: (oneBytebuff[0] & 0xff);
       }
       
+      @Override
       public synchronized int read(byte[] b) throws IOException {
         int ret = read(b, 0, b.length);
         if (ret != -1) {
@@ -866,6 +885,7 @@ public class HarFileSystem extends Filte
       /**
        * 
        */
+      @Override
       public synchronized int read(byte[] b, int offset, int len) 
         throws IOException {
         int newlen = len;
@@ -881,6 +901,7 @@ public class HarFileSystem extends Filte
         return ret;
       }
       
+      @Override
       public synchronized long skip(long n) throws IOException {
         long tmpN = n;
         if (tmpN > 0) {
@@ -894,10 +915,12 @@ public class HarFileSystem extends Filte
         return (tmpN < 0)? -1 : 0;
       }
       
+      @Override
       public synchronized long getPos() throws IOException {
         return (position - start);
       }
       
+      @Override
       public synchronized void seek(long pos) throws IOException {
         if (pos < 0 || (start + pos > end)) {
           throw new IOException("Failed to seek: EOF");
@@ -906,6 +929,7 @@ public class HarFileSystem extends Filte
         underLyingStream.seek(position);
       }
 
+      @Override
       public boolean seekToNewSource(long targetPos) throws IOException {
         //do not need to implement this
         // hdfs in itself does seektonewsource 
@@ -916,6 +940,7 @@ public class HarFileSystem extends Filte
       /**
        * implementing position readable. 
        */
+      @Override
       public int read(long pos, byte[] b, int offset, int length) 
       throws IOException {
         int nlength = length;
@@ -928,6 +953,7 @@ public class HarFileSystem extends Filte
       /**
        * position readable again.
        */
+      @Override
       public void readFully(long pos, byte[] b, int offset, int length) 
       throws IOException {
         if (start + length + pos > end) {
@@ -936,6 +962,7 @@ public class HarFileSystem extends Filte
         underLyingStream.readFully(pos + start, b, offset, length);
       }
       
+      @Override
       public void readFully(long pos, byte[] b) throws IOException {
           readFully(pos, b, 0, b.length);
       }
@@ -1055,7 +1082,7 @@ public class HarFileSystem extends Filte
       FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
       archiveIndexTimestamp = archiveStat.getModificationTime();
       LineReader aLin;
-      String retStr = null;
+
       // now start reading the real index file
       for (Store s: stores) {
         read = 0;

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java Fri Oct 19 02:25:55 2012
@@ -265,6 +265,9 @@ public class LocalDirAllocator {
     private synchronized void confChanged(Configuration conf) 
         throws IOException {
       String newLocalDirs = conf.get(contextCfgItemName);
+      if (null == newLocalDirs) {
+        throw new IOException(contextCfgItemName + " not configured");
+      }
       if (!newLocalDirs.equals(savedLocalDirs)) {
         localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
         localFS = FileSystem.getLocal(conf);

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java Fri Oct 19 02:25:55 2012
@@ -91,6 +91,7 @@ public class LocalFileSystem extends Che
    * Moves files to a bad file directory on the same device, so that their
    * storage will not be reused.
    */
+  @Override
   public boolean reportChecksumFailure(Path p, FSDataInputStream in,
                                        long inPos,
                                        FSDataInputStream sums, long sumsPos) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java Fri Oct 19 02:25:55 2012
@@ -94,6 +94,7 @@ public class LocatedFileStatus extends F
    * @throws ClassCastException if the specified object's is not of 
    *         type FileStatus
    */
+  @Override
   public int compareTo(Object o) {
     return super.compareTo(o);
   }
@@ -102,6 +103,7 @@ public class LocatedFileStatus extends F
    * @param   o the object to be compared.
    * @return  true if two file status has the same path name; false if not.
    */
+  @Override
   public boolean equals(Object o) {
     return super.equals(o);
   }
@@ -112,6 +114,7 @@ public class LocatedFileStatus extends F
    *
    * @return  a hash code value for the path name.
    */
+  @Override
   public int hashCode() {
     return super.hashCode();
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java Fri Oct 19 02:25:55 2012
@@ -23,12 +23,17 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.DataChecksum;
 import org.xml.sax.Attributes;
 import org.xml.sax.SAXException;
 import org.znerd.xmlenc.XMLOutputter;
 
+import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+
 /** MD5 of MD5 of CRC32. */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
@@ -52,27 +57,49 @@ public class MD5MD5CRC32FileChecksum ext
     this.md5 = md5;
   }
   
-  /** {@inheritDoc} */ 
+  @Override
   public String getAlgorithmName() {
-    return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC + "CRC32";
+    return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
+        getCrcType().name();
   }
 
-  /** {@inheritDoc} */ 
-  public int getLength() {return LENGTH;}
+  public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
+      throws IOException {
+    if (algorithm.endsWith(DataChecksum.Type.CRC32.name())) {
+      return DataChecksum.Type.CRC32;
+    } else if (algorithm.endsWith(DataChecksum.Type.CRC32C.name())) {
+      return DataChecksum.Type.CRC32C;
+    }
 
-  /** {@inheritDoc} */ 
+    throw new IOException("Unknown checksum type in " + algorithm);
+  }
+ 
+  @Override
+  public int getLength() {return LENGTH;}
+ 
+  @Override
   public byte[] getBytes() {
     return WritableUtils.toByteArray(this);
   }
 
-  /** {@inheritDoc} */ 
+  /** returns the CRC type */
+  public DataChecksum.Type getCrcType() {
+    // default to the one that is understood by all releases.
+    return DataChecksum.Type.CRC32;
+  }
+
+  public ChecksumOpt getChecksumOpt() {
+    return new ChecksumOpt(getCrcType(), bytesPerCRC);
+  }
+
+  @Override
   public void readFields(DataInput in) throws IOException {
     bytesPerCRC = in.readInt();
     crcPerBlock = in.readLong();
     md5 = MD5Hash.read(in);
   }
-
-  /** {@inheritDoc} */ 
+ 
+  @Override
   public void write(DataOutput out) throws IOException {
     out.writeInt(bytesPerCRC);
     out.writeLong(crcPerBlock);
@@ -86,6 +113,7 @@ public class MD5MD5CRC32FileChecksum ext
     if (that != null) {
       xml.attribute("bytesPerCRC", "" + that.bytesPerCRC);
       xml.attribute("crcPerBlock", "" + that.crcPerBlock);
+      xml.attribute("crcType", ""+ that.getCrcType().name());
       xml.attribute("md5", "" + that.md5);
     }
     xml.endTag();
@@ -97,21 +125,45 @@ public class MD5MD5CRC32FileChecksum ext
     final String bytesPerCRC = attrs.getValue("bytesPerCRC");
     final String crcPerBlock = attrs.getValue("crcPerBlock");
     final String md5 = attrs.getValue("md5");
+    String crcType = attrs.getValue("crcType");
+    DataChecksum.Type finalCrcType;
     if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
       return null;
     }
 
     try {
-      return new MD5MD5CRC32FileChecksum(Integer.valueOf(bytesPerCRC),
-          Integer.valueOf(crcPerBlock), new MD5Hash(md5));
-    } catch(Exception e) {
+      // old versions don't support crcType.
+      if (crcType == null || crcType.equals("")) {
+        finalCrcType = DataChecksum.Type.CRC32;
+      } else {
+        finalCrcType = DataChecksum.Type.valueOf(crcType);
+      }
+
+      switch (finalCrcType) {
+        case CRC32:
+          return new MD5MD5CRC32GzipFileChecksum(
+              Integer.valueOf(bytesPerCRC),
+              Integer.valueOf(crcPerBlock),
+              new MD5Hash(md5));
+        case CRC32C:
+          return new MD5MD5CRC32CastagnoliFileChecksum(
+              Integer.valueOf(bytesPerCRC),
+              Integer.valueOf(crcPerBlock),
+              new MD5Hash(md5));
+        default:
+          // we should never get here since finalCrcType will
+          // hold a valid type or we should have got an exception.
+          return null;
+      }
+    } catch (Exception e) {
       throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
-          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5, e);
+          + ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType 
+          + ", md5=" + md5, e);
     }
   }
-
-  /** {@inheritDoc} */ 
+ 
+  @Override
   public String toString() {
     return getAlgorithmName() + ":" + md5;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java Fri Oct 19 02:25:55 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -46,6 +47,10 @@ public final class Options {
     public static BytesPerChecksum bytesPerChecksum(short crc) {
       return new BytesPerChecksum(crc);
     }
+    public static ChecksumParam checksumParam(
+        ChecksumOpt csumOpt) {
+      return new ChecksumParam(csumOpt);
+    }
     public static Perms perms(FsPermission perm) {
       return new Perms(perm);
     }
@@ -91,7 +96,8 @@ public final class Options {
       }
       public int getValue() { return bufferSize; }
     }
-    
+
+    /** This is not needed if ChecksumParam is specified. **/
     public static class BytesPerChecksum extends CreateOpts {
       private final int bytesPerChecksum;
       protected BytesPerChecksum(short bpc) { 
@@ -103,6 +109,14 @@ public final class Options {
       }
       public int getValue() { return bytesPerChecksum; }
     }
+
+    public static class ChecksumParam extends CreateOpts {
+      private final ChecksumOpt checksumOpt;
+      protected ChecksumParam(ChecksumOpt csumOpt) {
+        checksumOpt = csumOpt;
+      }
+      public ChecksumOpt getValue() { return checksumOpt; }
+    }
     
     public static class Perms extends CreateOpts {
       private final FsPermission permissions;
@@ -206,4 +220,116 @@ public final class Options {
       return code;
     }
   }
+
+  /**
+   * This is used in FileSystem and FileContext to specify checksum options.
+   */
+  public static class ChecksumOpt {
+    private final int crcBlockSize;
+    private final DataChecksum.Type crcType;
+
+    /**
+     * Create a uninitialized one
+     */
+    public ChecksumOpt() {
+      crcBlockSize = -1;
+      crcType = DataChecksum.Type.DEFAULT;
+    }
+
+    /**
+     * Normal ctor
+     * @param type checksum type
+     * @param size bytes per checksum
+     */
+    public ChecksumOpt(DataChecksum.Type type, int size) {
+      crcBlockSize = size;
+      crcType = type;
+    }
+
+    public int getBytesPerChecksum() {
+      return crcBlockSize;
+    }
+
+    public DataChecksum.Type getChecksumType() {
+      return crcType;
+    }
+
+    /**
+     * Create a ChecksumOpts that disables checksum
+     */
+    public static ChecksumOpt createDisabled() {
+      return new ChecksumOpt(DataChecksum.Type.NULL, -1);
+    }
+
+    /**
+     * A helper method for processing user input and default value to 
+     * create a combined checksum option. This is a bit complicated because
+     * bytesPerChecksum is kept for backward compatibility.
+     *
+     * @param defaultOpt Default checksum option
+     * @param userOpt User-specified checksum option. Ignored if null.
+     * @param userBytesPerChecksum User-specified bytesPerChecksum
+     *                Ignored if < 0.
+     */
+    public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt, 
+        ChecksumOpt userOpt, int userBytesPerChecksum) {
+      // The following is done to avoid unnecessary creation of new objects.
+      // tri-state variable: 0 default, 1 userBytesPerChecksum, 2 userOpt
+      short whichSize;
+      // true default, false userOpt
+      boolean useDefaultType;
+      
+      //  bytesPerChecksum - order of preference
+      //    user specified value in bytesPerChecksum
+      //    user specified value in checksumOpt
+      //    default.
+      if (userBytesPerChecksum > 0) {
+        whichSize = 1; // userBytesPerChecksum
+      } else if (userOpt != null && userOpt.getBytesPerChecksum() > 0) {
+        whichSize = 2; // userOpt
+      } else {
+        whichSize = 0; // default
+      }
+
+      // checksum type - order of preference
+      //   user specified value in checksumOpt
+      //   default.
+      if (userOpt != null &&
+            userOpt.getChecksumType() != DataChecksum.Type.DEFAULT) {
+        useDefaultType = false;
+      } else {
+        useDefaultType = true;
+      }
+
+      // Short out the common and easy cases
+      if (whichSize == 0 && useDefaultType) {
+        return defaultOpt;
+      } else if (whichSize == 2 && !useDefaultType) {
+        return userOpt;
+      }
+
+      // Take care of the rest of combinations
+      DataChecksum.Type type = useDefaultType ? defaultOpt.getChecksumType() :
+          userOpt.getChecksumType();
+      if (whichSize == 0) {
+        return new ChecksumOpt(type, defaultOpt.getBytesPerChecksum());
+      } else if (whichSize == 1) {
+        return new ChecksumOpt(type, userBytesPerChecksum);
+      } else {
+        return new ChecksumOpt(type, userOpt.getBytesPerChecksum());
+      }
+    }
+
+    /**
+     * A helper method for processing user input and default value to 
+     * create a combined checksum option. 
+     *
+     * @param defaultOpt Default checksum option
+     * @param userOpt User-specified checksum option
+     */
+    public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt,
+        ChecksumOpt userOpt) {
+      return processChecksumOpt(defaultOpt, userOpt, -1);
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java Fri Oct 19 02:25:55 2012
@@ -68,7 +68,7 @@ public class Path implements Comparable 
     // Add a slash to parent's path so resolution is compatible with URI's
     URI parentUri = parent.uri;
     String parentPath = parentUri.getPath();
-    if (!(parentPath.equals("/") || parentPath.equals(""))) {
+    if (!(parentPath.equals("/") || parentPath.isEmpty())) {
       try {
         parentUri = new URI(parentUri.getScheme(), parentUri.getAuthority(),
                       parentUri.getPath()+"/", null, parentUri.getFragment());
@@ -139,7 +139,7 @@ public class Path implements Comparable 
    * Construct a path from a URI
    */
   public Path(URI aUri) {
-    uri = aUri;
+    uri = aUri.normalize();
   }
   
   /** Construct a Path from components. */
@@ -261,6 +261,7 @@ public class Path implements Comparable 
     return new Path(getParent(), getName()+suffix);
   }
 
+  @Override
   public String toString() {
     // we can't use uri.toString(), which escapes everything, because we want
     // illegal characters unescaped in the string, for glob processing, etc.
@@ -289,6 +290,7 @@ public class Path implements Comparable 
     return buffer.toString();
   }
 
+  @Override
   public boolean equals(Object o) {
     if (!(o instanceof Path)) {
       return false;
@@ -297,10 +299,12 @@ public class Path implements Comparable 
     return this.uri.equals(that.uri);
   }
 
+  @Override
   public int hashCode() {
     return uri.hashCode();
   }
 
+  @Override
   public int compareTo(Object o) {
     Path that = (Path)o;
     return this.uri.compareTo(that.uri);

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java Fri Oct 19 02:25:55 2012
@@ -26,6 +26,7 @@ import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -71,8 +72,10 @@ public class RawLocalFileSystem extends 
     return new File(path.toUri().getPath());
   }
 
+  @Override
   public URI getUri() { return NAME; }
   
+  @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
@@ -83,6 +86,7 @@ public class RawLocalFileSystem extends 
       super(f);
     }
     
+    @Override
     public int read() throws IOException {
       int result = super.read();
       if (result != -1) {
@@ -91,6 +95,7 @@ public class RawLocalFileSystem extends 
       return result;
     }
     
+    @Override
     public int read(byte[] data) throws IOException {
       int result = super.read(data);
       if (result != -1) {
@@ -99,6 +104,7 @@ public class RawLocalFileSystem extends 
       return result;
     }
     
+    @Override
     public int read(byte[] data, int offset, int length) throws IOException {
       int result = super.read(data, offset, length);
       if (result != -1) {
@@ -111,7 +117,7 @@ public class RawLocalFileSystem extends 
   /*******************************************************
    * For open()'s FSInputStream.
    *******************************************************/
-  class LocalFSFileInputStream extends FSInputStream {
+  class LocalFSFileInputStream extends FSInputStream implements HasFileDescriptor {
     private FileInputStream fis;
     private long position;
 
@@ -119,15 +125,18 @@ public class RawLocalFileSystem extends 
       this.fis = new TrackingFileInputStream(pathToFile(f));
     }
     
+    @Override
     public void seek(long pos) throws IOException {
       fis.getChannel().position(pos);
       this.position = pos;
     }
     
+    @Override
     public long getPos() throws IOException {
       return this.position;
     }
     
+    @Override
     public boolean seekToNewSource(long targetPos) throws IOException {
       return false;
     }
@@ -135,11 +144,14 @@ public class RawLocalFileSystem extends 
     /*
      * Just forward to the fis
      */
+    @Override
     public int available() throws IOException { return fis.available(); }
+    @Override
     public void close() throws IOException { fis.close(); }
     @Override
     public boolean markSupported() { return false; }
     
+    @Override
     public int read() throws IOException {
       try {
         int value = fis.read();
@@ -152,6 +164,7 @@ public class RawLocalFileSystem extends 
       }
     }
     
+    @Override
     public int read(byte[] b, int off, int len) throws IOException {
       try {
         int value = fis.read(b, off, len);
@@ -164,6 +177,7 @@ public class RawLocalFileSystem extends 
       }
     }
     
+    @Override
     public int read(long position, byte[] b, int off, int len)
       throws IOException {
       ByteBuffer bb = ByteBuffer.wrap(b, off, len);
@@ -174,6 +188,7 @@ public class RawLocalFileSystem extends 
       }
     }
     
+    @Override
     public long skip(long n) throws IOException {
       long value = fis.skip(n);
       if (value > 0) {
@@ -181,8 +196,14 @@ public class RawLocalFileSystem extends 
       }
       return value;
     }
+
+    @Override
+    public FileDescriptor getFileDescriptor() throws IOException {
+      return fis.getFD();
+    }
   }
   
+  @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     if (!exists(f)) {
       throw new FileNotFoundException(f.toString());
@@ -204,8 +225,11 @@ public class RawLocalFileSystem extends 
     /*
      * Just forward to the fos
      */
+    @Override
     public void close() throws IOException { fos.close(); }
+    @Override
     public void flush() throws IOException { fos.flush(); }
+    @Override
     public void write(byte[] b, int off, int len) throws IOException {
       try {
         fos.write(b, off, len);
@@ -214,6 +238,7 @@ public class RawLocalFileSystem extends 
       }
     }
     
+    @Override
     public void write(int b) throws IOException {
       try {
         fos.write(b);
@@ -223,7 +248,7 @@ public class RawLocalFileSystem extends 
     }
   }
 
-  /** {@inheritDoc} */
+  @Override
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
     if (!exists(f)) {
@@ -236,7 +261,6 @@ public class RawLocalFileSystem extends 
         new LocalFSFileOutputStream(f, true), bufferSize), statistics);
   }
 
-  /** {@inheritDoc} */
   @Override
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
     short replication, long blockSize, Progressable progress)
@@ -258,7 +282,6 @@ public class RawLocalFileSystem extends 
         new LocalFSFileOutputStream(f, false), bufferSize), statistics);
   }
 
-  /** {@inheritDoc} */
   @Override
   public FSDataOutputStream create(Path f, FsPermission permission,
     boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -270,7 +293,6 @@ public class RawLocalFileSystem extends 
     return out;
   }
 
-  /** {@inheritDoc} */
   @Override
   public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
       boolean overwrite,
@@ -282,6 +304,7 @@ public class RawLocalFileSystem extends 
     return out;
   }
 
+  @Override
   public boolean rename(Path src, Path dst) throws IOException {
     if (pathToFile(src).renameTo(pathToFile(dst))) {
       return true;
@@ -296,6 +319,7 @@ public class RawLocalFileSystem extends 
    * @return true if the file or directory and all its contents were deleted
    * @throws IOException if p is non-empty and recursive is false 
    */
+  @Override
   public boolean delete(Path p, boolean recursive) throws IOException {
     File f = pathToFile(p);
     if (f.isFile()) {
@@ -313,6 +337,7 @@ public class RawLocalFileSystem extends 
    * (<b>Note</b>: Returned list is not sorted in any given order,
    * due to reliance on Java's {@link File#list()} API.)
    */
+  @Override
   public FileStatus[] listStatus(Path f) throws IOException {
     File localf = pathToFile(f);
     FileStatus[] results;
@@ -350,6 +375,7 @@ public class RawLocalFileSystem extends 
    * Creates the specified directory hierarchy. Does not
    * treat existence as an error.
    */
+  @Override
   public boolean mkdirs(Path f) throws IOException {
     if(f == null) {
       throw new IllegalArgumentException("mkdirs path arg is null");
@@ -367,7 +393,6 @@ public class RawLocalFileSystem extends 
       (p2f.mkdir() || p2f.isDirectory());
   }
 
-  /** {@inheritDoc} */
   @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     boolean b = mkdirs(f);
@@ -412,7 +437,6 @@ public class RawLocalFileSystem extends 
     return this.makeQualified(new Path(System.getProperty("user.dir")));
   }
 
-  /** {@inheritDoc} */
   @Override
   public FsStatus getStatus(Path p) throws IOException {
     File partition = pathToFile(p == null ? new Path("/") : p);
@@ -424,29 +448,35 @@ public class RawLocalFileSystem extends 
   }
   
   // In the case of the local filesystem, we can just rename the file.
+  @Override
   public void moveFromLocalFile(Path src, Path dst) throws IOException {
     rename(src, dst);
   }
   
   // We can write output directly to the final location
+  @Override
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     return fsOutputFile;
   }
   
   // It's in the right place - nothing to do.
+  @Override
   public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile)
     throws IOException {
   }
   
+  @Override
   public void close() throws IOException {
     super.close();
   }
   
+  @Override
   public String toString() {
     return "LocalFS";
   }
   
+  @Override
   public FileStatus getFileStatus(Path f) throws IOException {
     File path = pathToFile(f);
     if (path.exists()) {
@@ -462,7 +492,7 @@ public class RawLocalFileSystem extends 
      * onwer.equals("").
      */
     private boolean isPermissionLoaded() {
-      return !super.getOwner().equals(""); 
+      return !super.getOwner().isEmpty(); 
     }
     
     RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java Fri Oct 19 02:25:55 2012
@@ -68,8 +68,26 @@ public class Trash extends Configured {
   public static boolean moveToAppropriateTrash(FileSystem fs, Path p,
       Configuration conf) throws IOException {
     Path fullyResolvedPath = fs.resolvePath(p);
-    Trash trash = new Trash(FileSystem.get(fullyResolvedPath.toUri(), conf), conf);
-    boolean success =  trash.moveToTrash(fullyResolvedPath);
+    FileSystem fullyResolvedFs =
+        FileSystem.get(fullyResolvedPath.toUri(), conf);
+    // If the trash interval is configured server side then clobber this
+    // configuration so that we always respect the server configuration.
+    try {
+      long trashInterval = fullyResolvedFs.getServerDefaults(
+          fullyResolvedPath).getTrashInterval();
+      if (0 != trashInterval) {
+        Configuration confCopy = new Configuration(conf);
+        confCopy.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY,
+            trashInterval);
+        conf = confCopy;
+      }
+    } catch (Exception e) {
+      // If we can not determine that trash is enabled server side then
+      // bail rather than potentially deleting a file when trash is enabled.
+      throw new IOException("Failed to get server trash configuration", e);
+    }
+    Trash trash = new Trash(fullyResolvedFs, conf);
+    boolean success = trash.moveToTrash(fullyResolvedPath);
     if (success) {
       System.out.println("Moved: '" + p + "' to trash at: " +
           trash.getCurrentTrashDir() );
@@ -117,9 +135,4 @@ public class Trash extends Configured {
   public Runnable getEmptier() throws IOException {
     return trashPolicy.getEmptier();
   }
-
-  /** Run an emptier.*/
-  public static void main(String[] args) throws Exception {
-    new Trash(new Configuration()).getEmptier().run();
-  }
 }