You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/04/20 22:22:24 UTC

svn commit: r1470225 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache/h...

Author: suresh
Date: Sat Apr 20 20:22:21 2013
New Revision: 1470225

URL: http://svn.apache.org/r1470225
Log:
Merge trunk to HDFS-2802 branch. This involves fixing many conflict with HDFS-4434.

Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1469644-1470194

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Apr 20 20:22:21 2013
@@ -6,6 +6,8 @@ Trunk (Unreleased)
 
     HDFS-3034. Remove the deprecated DFSOutputStream.sync() method.  (szetszwo)
 
+    HDFS-4434. Provide a mapping from INodeId to INode. (suresh)
+
   NEW FEATURES
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@@ -2558,6 +2560,9 @@ Release 0.23.8 - UNRELEASED
 
     HDFS-4477. Secondary namenode may retain old tokens (daryn via kihwal)
 
+    HDFS-4699. TestPipelinesFailover#testPipelineRecoveryStress fails
+    sporadically (Chris Nauroth via kihwal)
+
 Release 0.23.7 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1469644-1470194

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java Sat Apr 20 20:22:21 2013
@@ -67,7 +67,10 @@ class BlocksMap {
 
 
   void close() {
-    blocks.clear();
+    if (blocks != null) {
+      blocks.clear();
+      blocks = null;
+    }
   }
 
   BlockCollection getBlockCollection(Block b) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sat Apr 20 20:22:21 2013
@@ -1286,7 +1286,10 @@ public class DataNode extends Configured
     LOG.warn("checkDiskError: exception: ", e);
     if (e instanceof SocketException || e instanceof SocketTimeoutException
     	  || e instanceof ClosedByInterruptException 
-    	  || e.getMessage().startsWith("Broken pipe")) {
+    	  || e.getMessage().startsWith("An established connection was aborted")
+    	  || e.getMessage().startsWith("Broken pipe")
+    	  || e.getMessage().startsWith("Connection reset")
+    	  || e.getMessage().contains("java.nio.channels.SocketChannel")) {
       LOG.info("Not checking disk as checkDiskError was called on a network" +
       		" related exception");	
       return;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sat Apr 20 20:22:21 2013
@@ -29,6 +29,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -60,8 +61,10 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
+import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -70,6 +73,8 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotException;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.util.GSet;
+import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -86,7 +91,7 @@ import com.google.common.base.Preconditi
 public class FSDirectory implements Closeable {
   private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
     final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
-        namesystem.allocateNewInodeId(),
+        INodeId.ROOT_INODE_ID,
         INodeDirectory.ROOT_NAME,
         namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
     final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
@@ -94,6 +99,16 @@ public class FSDirectory implements Clos
     return s;
   }
 
+  @VisibleForTesting
+  static boolean CHECK_RESERVED_FILE_NAMES = true;
+  public final static String DOT_RESERVED_STRING = ".reserved";
+  public final static String DOT_RESERVED_PATH_PREFIX = Path.SEPARATOR
+      + DOT_RESERVED_STRING;
+  public final static byte[] DOT_RESERVED = 
+      DFSUtil.string2Bytes(DOT_RESERVED_STRING);
+  public final static String DOT_INODES_STRING = ".inodes";
+  public final static byte[] DOT_INODES = 
+      DFSUtil.string2Bytes(DOT_INODES_STRING);
   INodeDirectoryWithQuota rootDir;
   FSImage fsImage;  
   private final FSNamesystem namesystem;
@@ -101,6 +116,7 @@ public class FSDirectory implements Clos
   private final int maxComponentLength;
   private final int maxDirItems;
   private final int lsLimit;  // max list limit
+  private GSet<INode, INode> inodeMap; // Synchronized by dirLock
 
   // lock to protect the directory and BlockMap
   private ReentrantReadWriteLock dirLock;
@@ -141,6 +157,7 @@ public class FSDirectory implements Clos
     this.dirLock = new ReentrantReadWriteLock(true); // fair
     this.cond = dirLock.writeLock().newCondition();
     rootDir = createRoot(ns);
+    inodeMap = initInodeMap(rootDir);
     this.fsImage = fsImage;
     int configuredLimit = conf.getInt(
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
@@ -163,6 +180,16 @@ public class FSDirectory implements Clos
     nameCache = new NameCache<ByteArray>(threshold);
     namesystem = ns;
   }
+  
+  @VisibleForTesting
+  static LightWeightGSet<INode, INode> initInodeMap(INodeDirectory rootDir) {
+    // Compute the map capacity by allocating 1% of total memory
+    int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
+    LightWeightGSet<INode, INode> map = new LightWeightGSet<INode, INode>(
+        capacity);
+    map.put(rootDir);
+    return map;
+  }
     
   private FSNamesystem getFSNamesystem() {
     return namesystem;
@@ -272,9 +299,8 @@ public class FSDirectory implements Clos
     if (!mkdirs(parent.toString(), permissions, true, modTime)) {
       return null;
     }
-    long id = namesystem.allocateNewInodeId();
     INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
-                                 id,
+                                 namesystem.allocateNewInodeId(),
                                  permissions,replication,
                                  preferredBlockSize, modTime, clientName, 
                                  clientMachine, clientNode);
@@ -1329,6 +1355,7 @@ public class FSDirectory implements Clos
     // collect block
     if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
       targetNode.destroyAndCollectBlocks(collectedBlocks);
+      remvoedAllFromInodesFromMap(targetNode);
     } else {
       Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot,
           collectedBlocks);
@@ -1393,6 +1420,7 @@ public class FSDirectory implements Clos
     Preconditions.checkState(hasWriteLock());
 
     oldnode.getParent().replaceChild(oldnode, newnode);
+    inodeMap.put(newnode);
     oldnode.clear();
 
     /* Currently oldnode and newnode are assumed to contain the same
@@ -1909,6 +1937,15 @@ public class FSDirectory implements Clos
     }
   }
   
+  private INode getFromINodeMap(INode inode) {
+    readLock();
+    try {
+      return inodeMap.get(inode);
+    } finally {
+      readUnlock();
+    }
+  }
+  
   /**
    * Add the given child to the namespace.
    * @param src The full path name of the child node.
@@ -2082,6 +2119,17 @@ public class FSDirectory implements Clos
   private boolean addChild(INodesInPath iip, int pos,
       INode child, boolean checkQuota) throws QuotaExceededException {
     final INode[] inodes = iip.getINodes();
+    // Disallow creation of /.reserved. This may be created when loading
+    // editlog/fsimage during upgrade since /.reserved was a valid name in older
+    // release. This may also be called when a user tries to create a file
+    // or directory /.reserved.
+    if (pos == 1 && inodes[0] == rootDir && isReservedName(child)) {
+      throw new HadoopIllegalArgumentException(
+          "File name \"" + child.getLocalName() + "\" is reserved and cannot "
+              + "be created. If this is during upgrade change the name of the "
+              + "existing file or directory to another name before upgrading "
+              + "to the new release.");
+    }
     // The filesystem limits are not really quotas, so this check may appear
     // odd. It's because a rename operation deletes the src, tries to add
     // to the dest, if that fails, re-adds the src from whence it came.
@@ -2106,6 +2154,7 @@ public class FSDirectory implements Clos
     } else {
       // update parent node
       iip.setINode(pos - 1, child.getParent());
+      inodeMap.put(child);
     }
     return added;
   }
@@ -2135,9 +2184,10 @@ public class FSDirectory implements Clos
     if (!parent.removeChild(last, latestSnapshot)) {
       return -1;
     }
-
+    inodeMap.remove(last);
     if (parent != last.getParent()) {
       // parent is changed
+      inodeMap.put(last.getParent());
       iip.setINode(-2, last.getParent());
     }
     
@@ -2180,6 +2230,29 @@ public class FSDirectory implements Clos
       readUnlock();
     }
   }
+
+  /** This method is always called with writeLock held */
+  final void addToInodeMapUnprotected(INode inode) {
+    inodeMap.put(inode);
+  }
+  
+  /* This method is always called with writeLock held */
+  private final void removeFromInodeMap(INode inode) {
+    inodeMap.remove(inode);
+  }
+  
+  /** Remove all the inodes under given inode from the map */
+  private void remvoedAllFromInodesFromMap(INode inode) {
+    removeFromInodeMap(inode);
+    if (!inode.isDirectory()) {
+      return;
+    }
+    INodeDirectory dir = (INodeDirectory) inode;
+    for (INode child : dir.getChildrenList(null)) {
+      remvoedAllFromInodesFromMap(child);
+    }
+    dir.clearChildren();
+  }
   
   /**
    * See {@link ClientProtocol#setQuota(String, long, long)} for the contract.
@@ -2286,7 +2359,7 @@ public class FSDirectory implements Clos
     boolean status = false;
     writeLock();
     try {
-      status = unprotectedSetTimes(src, inode, mtime, atime, force, latest);
+      status = unprotectedSetTimes(inode, mtime, atime, force, latest);
     } finally {
       writeUnlock();
     }
@@ -2299,11 +2372,11 @@ public class FSDirectory implements Clos
       throws UnresolvedLinkException, QuotaExceededException {
     assert hasWriteLock();
     final INodesInPath i = getLastINodeInPath(src); 
-    return unprotectedSetTimes(src, i.getLastINode(), mtime, atime, force,
+    return unprotectedSetTimes(i.getLastINode(), mtime, atime, force,
         i.getLatestSnapshot());
   }
 
-  private boolean unprotectedSetTimes(String src, INode inode, long mtime,
+  private boolean unprotectedSetTimes(INode inode, long mtime,
       long atime, boolean force, Snapshot latest) throws QuotaExceededException {
     assert hasWriteLock();
     boolean status = false;
@@ -2496,5 +2569,128 @@ public class FSDirectory implements Clos
   
   void shutdown() {
     nameCache.reset();
+    inodeMap.clear();
+    inodeMap = null;
+  }
+  
+  @VisibleForTesting
+  INode getInode(long id) {
+    INode inode = new INodeWithAdditionalFields(id, null, new PermissionStatus(
+        "", "", new FsPermission((short) 0)), 0, 0) {
+      
+      @Override
+      INode recordModification(Snapshot latest) throws QuotaExceededException {
+        return null;
+      }
+      
+      @Override
+      public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) {
+        // Nothing to do
+      }
+      
+      @Override
+      public Counts computeQuotaUsage(Counts counts, boolean useCache) {
+        return null;
+      }
+      
+      @Override
+      public Content.Counts computeContentSummary(Content.Counts counts) {
+        return null;
+      }
+      
+      @Override
+      public CountsMap computeContentSummary(CountsMap countsMap) {
+        return null;
+      }
+      
+      @Override
+      public Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
+          BlocksMapUpdateInfo collectedBlocks) throws QuotaExceededException {
+        return null;
+      }
+    };
+      
+    return getFromINodeMap(inode);
+  }
+  
+  /**
+   * Given an INode get all the path complents leading to it from the root.
+   * If an Inode corresponding to C is given in /A/B/C, the returned
+   * patch components will be {root, A, B, C}
+   */
+  static byte[][] getPathComponents(INode inode) {
+    List<byte[]> components = new ArrayList<byte[]>();
+    components.add(0, inode.getLocalNameBytes());
+    while(inode.getParent() != null) {
+      components.add(0, inode.getParent().getLocalNameBytes());
+      inode = inode.getParent();
+    }
+    return components.toArray(new byte[components.size()][]);
+  }
+  
+  /**
+   * @return path components for reserved path, else null.
+   */
+  static byte[][] getPathComponentsForReservedPath(String src) {
+    return !isReservedName(src) ? null : INode.getPathComponents(src);
+  }
+  
+  /**
+   * Resolve the path of /.reserved/.inodes/<inodeid>/... to a regular path
+   * 
+   * @param src path that is being processed
+   * @param pathComponents path components corresponding to the path
+   * @param fsd FSDirectory
+   * @return if the path indicates an inode, return path after replacing upto
+   *         <inodeid> with the corresponding path of the inode, else the path
+   *         in {@code src} as is.
+   * @throws FileNotFoundException if inodeid is invalid
+   */
+  static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd)
+      throws FileNotFoundException {
+    if (pathComponents == null || pathComponents.length <= 3) {
+      return src;
+    }
+    // Not /.reserved/.inodes
+    if (!Arrays.equals(DOT_RESERVED, pathComponents[1])
+        || !Arrays.equals(DOT_INODES, pathComponents[2])) { // Not .inodes path
+      return src;
+    }
+    final String inodeId = DFSUtil.bytes2String(pathComponents[3]);
+    long id = 0;
+    try {
+      id = Long.valueOf(inodeId);
+    } catch (NumberFormatException e) {
+      throw new FileNotFoundException(
+          "File for given inode path does not exist: " + src);
+    }
+    if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) {
+      return Path.SEPARATOR;
+    }
+    StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder()
+        : new StringBuilder(fsd.getInode(id).getFullPathName());
+    for (int i = 4; i < pathComponents.length; i++) {
+      path.append(Path.SEPARATOR).append(DFSUtil.bytes2String(pathComponents[i]));
+    }
+    if (NameNode.LOG.isDebugEnabled()) {
+      NameNode.LOG.debug("Resolved path is " + path);
+    }
+    return path.toString();
+  }
+  
+  @VisibleForTesting
+  int getInodeMapSize() {
+    return inodeMap.size();
+  }
+  
+  /** Check if a given inode name is reserved */
+  public static boolean isReservedName(INode inode) {
+    return CHECK_RESERVED_FILE_NAMES
+        && Arrays.equals(inode.getLocalNameBytes(), DOT_RESERVED);
+  }
+  
+  /** Check if a given path is reserved */
+  public static boolean isReservedName(String src) {
+    return src.startsWith(DOT_RESERVED_PATH_PREFIX);
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Sat Apr 20 20:22:21 2013
@@ -38,6 +38,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -526,6 +527,13 @@ public class FSImageFormat {
    * modification time update and space count update are not needed.
    */
   private void addToParent(INodeDirectory parent, INode child) {
+    FSDirectory fsDir = namesystem.dir;
+    if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) {
+        throw new HadoopIllegalArgumentException("File name \""
+            + child.getLocalName() + "\" is reserved. Please "
+            + " change the name of the existing file or directory to another "
+            + "name before upgrading to this release.");
+    }
     // NOTE: This does not update space counts for parents
     if (!parent.addChild(child)) {
       return;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Apr 20 20:22:21 2013
@@ -912,7 +912,7 @@ public class FSNamesystem implements Nam
     }
   }
   
-  
+  @Override
   public void checkOperation(OperationCategory op) throws StandbyException {
     if (haContext != null) {
       // null in some unit tests
@@ -1217,12 +1217,14 @@ public class FSNamesystem implements Nam
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot set permission for " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
       dir.setPermission(src, permission);
       resultingStat = getAuditFileInfo(src, false);
@@ -1254,12 +1256,14 @@ public class FSNamesystem implements Nam
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot set owner for " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
       if (!pc.isSuperUser()) {
         if (username != null && !pc.getUser().equals(username)) {
@@ -1355,6 +1359,7 @@ public class FSNamesystem implements Nam
       throws FileNotFoundException,
       UnresolvedLinkException, IOException {
     FSPermissionChecker pc = getPermissionChecker();
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     for (int attempt = 0; attempt < 2; attempt++) {
       boolean isReadOp = (attempt == 0);
       if (isReadOp) { // first attempt is with readlock
@@ -1364,6 +1369,7 @@ public class FSNamesystem implements Nam
         checkOperation(OperationCategory.WRITE);
         writeLock(); // writelock is needed to set accesstime
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       try {
         if (isReadOp) {
           checkOperation(OperationCategory.READ);
@@ -1413,6 +1419,8 @@ public class FSNamesystem implements Nam
    * Moves all the blocks from srcs and appends them to trg
    * To avoid rollbacks we will verify validitity of ALL of the args
    * before we start actual move.
+   * 
+   * This does not support ".inodes" relative path
    * @param target
    * @param srcs
    * @throws IOException
@@ -1603,12 +1611,14 @@ public class FSNamesystem implements Nam
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot set times " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
 
       // Write access is required to set access and modification times
       if (isPermissionEnabled) {
@@ -1635,7 +1645,10 @@ public class FSNamesystem implements Nam
       PermissionStatus dirPerms, boolean createParent) 
       throws IOException, UnresolvedLinkException {
     if (!DFSUtil.isValidName(link)) {
-      throw new InvalidPathException("Invalid file name: " + link);
+      throw new InvalidPathException("Invalid link name: " + link);
+    }
+    if (FSDirectory.isReservedName(target)) {
+      throw new InvalidPathException("Invalid target name: " + target);
     }
     try {
       createSymlinkInt(target, link, dirPerms, createParent);
@@ -1655,12 +1668,14 @@ public class FSNamesystem implements Nam
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot create symlink " + link, safeMode);
       }
+      link = FSDirectory.resolvePath(link, pathComponents, dir);
       if (!createParent) {
         verifyParentDir(link);
       }
@@ -1707,18 +1722,20 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private boolean setReplicationInt(final String src, final short replication)
+  private boolean setReplicationInt(String src, final short replication)
       throws IOException {
     blockManager.verifyReplication(src, replication, null);
     final boolean isFile;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot set replication for " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.WRITE);
       }
@@ -1744,9 +1761,11 @@ public class FSNamesystem implements Nam
       throws IOException, UnresolvedLinkException {
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(filename);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
+      filename = FSDirectory.resolvePath(filename, pathComponents, dir);
       if (isPermissionEnabled) {
         checkTraverse(pc, filename);
       }
@@ -1819,8 +1838,14 @@ public class FSNamesystem implements Nam
     final HdfsFileStatus stat;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
+      checkOperation(OperationCategory.WRITE);
+      if (isInSafeMode()) {
+        throw new SafeModeException("Cannot create file" + src, safeMode);
+      }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       startFileInternal(pc, src, permissions, holder, clientMachine, flag,
           createParent, replication, blockSize);
       stat = dir.getFileInfo(src, false);
@@ -1863,10 +1888,6 @@ public class FSNamesystem implements Nam
       AccessControlException, UnresolvedLinkException, FileNotFoundException,
       ParentNotDirectoryException, IOException {
     assert hasWriteLock();
-    checkOperation(OperationCategory.WRITE);
-    if (isInSafeMode()) {
-      throw new SafeModeException("Cannot create file" + src, safeMode);
-    }
     // Verify that the destination does not exist as a directory already.
     final INodesInPath iip = dir.getINodesInPath4Write(src);
     final INode inode = iip.getLastINode();
@@ -2003,6 +2024,7 @@ public class FSNamesystem implements Nam
     boolean skipSync = false;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2010,6 +2032,7 @@ public class FSNamesystem implements Nam
         throw new SafeModeException(
             "Cannot recover the lease of " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src);
       if (!inode.isUnderConstruction()) {
         return true;
@@ -2127,6 +2150,11 @@ public class FSNamesystem implements Nam
       throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, IOException {
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
+          + ", holder=" + holder
+          + ", clientMachine=" + clientMachine);
+    }
     boolean skipSync = false;
     if (!supportAppends) {
       throw new UnsupportedOperationException(
@@ -2145,8 +2173,14 @@ public class FSNamesystem implements Nam
     LocatedBlock lb = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
+      checkOperation(OperationCategory.WRITE);
+      if (isInSafeMode()) {
+        throw new SafeModeException("Cannot append to file" + src, safeMode);
+      }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       lb = startFileInternal(pc, src, null, holder, clientMachine, 
                         EnumSet.of(CreateFlag.APPEND), 
                         false, blockManager.maxReplication, 0);
@@ -2210,9 +2244,11 @@ public class FSNamesystem implements Nam
 
     // Part I. Analyze the state of the file with respect to the input data.
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       LocatedBlock[] onRetryBlock = new LocatedBlock[1];
       final INode[] inodes = analyzeFileState(
           src, fileId, clientName, previous, onRetryBlock).getINodes();
@@ -2384,7 +2420,7 @@ public class FSNamesystem implements Nam
   }
 
   /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
-  LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
+  LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk,
       final DatanodeInfo[] existings,  final HashMap<Node, Node> excludes,
       final int numAdditionalNodes, final String clientName
       ) throws IOException {
@@ -2395,6 +2431,7 @@ public class FSNamesystem implements Nam
     final long preferredblocksize;
     final List<DatanodeDescriptor> chosen;
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -2403,6 +2440,7 @@ public class FSNamesystem implements Nam
         throw new SafeModeException("Cannot add datanode; src=" + src
             + ", blk=" + blk, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
 
       //check lease
       final INodeFileUnderConstruction file = checkLease(src, clientName);
@@ -2442,6 +2480,7 @@ public class FSNamesystem implements Nam
           + "of file " + src);
     }
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2449,6 +2488,8 @@ public class FSNamesystem implements Nam
         throw new SafeModeException("Cannot abandon block " + b +
                                     " for fle" + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
+
       //
       // Remove the block from the pending creates list
       //
@@ -2520,10 +2561,16 @@ public class FSNamesystem implements Nam
     checkBlock(last);
     boolean success = false;
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
-      success = completeFileInternal(src, holder,
-          ExtendedBlock.getLocalBlock(last));
+      checkOperation(OperationCategory.WRITE);
+      if (isInSafeMode()) {
+        throw new SafeModeException("Cannot complete file " + src, safeMode);
+      }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      success = completeFileInternal(src, holder, 
+        ExtendedBlock.getLocalBlock(last));
     } finally {
       writeUnlock();
     }
@@ -2537,11 +2584,6 @@ public class FSNamesystem implements Nam
       String holder, Block last) throws SafeModeException,
       UnresolvedLinkException, IOException {
     assert hasWriteLock();
-    checkOperation(OperationCategory.WRITE);
-    if (isInSafeMode()) {
-      throw new SafeModeException("Cannot complete file " + src, safeMode);
-    }
-
     final INodesInPath iip = dir.getLastINodeInPath(src);
     final INodeFileUnderConstruction pendingFile;
     try {
@@ -2687,10 +2729,19 @@ public class FSNamesystem implements Nam
     }
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
     boolean status = false;
     HdfsFileStatus resultingStat = null;
     writeLock();
     try {
+      checkOperation(OperationCategory.WRITE);
+      if (isInSafeMode()) {
+        throw new SafeModeException("Cannot rename " + src, safeMode);
+      }
+      src = FSDirectory.resolvePath(src, srcComponents, dir);
+      dst = FSDirectory.resolvePath(dst, dstComponents, dir);
+      checkOperation(OperationCategory.WRITE);
       status = renameToInternal(pc, src, dst);
       if (status) {
         resultingStat = getAuditFileInfo(dst, false);
@@ -2710,10 +2761,6 @@ public class FSNamesystem implements Nam
   private boolean renameToInternal(FSPermissionChecker pc, String src, String dst)
     throws IOException, UnresolvedLinkException {
     assert hasWriteLock();
-      checkOperation(OperationCategory.WRITE);
-    if (isInSafeMode()) {
-      throw new SafeModeException("Cannot rename " + src, safeMode);
-    }
     if (isPermissionEnabled) {
       //We should not be doing this.  This is move() not renameTo().
       //but for now,
@@ -2744,9 +2791,17 @@ public class FSNamesystem implements Nam
     }
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
     HdfsFileStatus resultingStat = null;
     writeLock();
     try {
+      checkOperation(OperationCategory.WRITE);
+      if (isInSafeMode()) {
+        throw new SafeModeException("Cannot rename " + src, safeMode);
+      }
+      src = FSDirectory.resolvePath(src, srcComponents, dir);
+      dst = FSDirectory.resolvePath(dst, dstComponents, dir);
       renameToInternal(pc, src, dst, options);
       resultingStat = getAuditFileInfo(dst, false);
     } finally {
@@ -2765,10 +2820,6 @@ public class FSNamesystem implements Nam
   private void renameToInternal(FSPermissionChecker pc, String src, String dst,
       Options.Rename... options) throws IOException {
     assert hasWriteLock();
-    checkOperation(OperationCategory.WRITE);
-    if (isInSafeMode()) {
-      throw new SafeModeException("Cannot rename " + src, safeMode);
-    }
     if (isPermissionEnabled) {
       checkParentAccess(pc, src, FsAction.WRITE);
       checkAncestorAccess(pc, dst, FsAction.WRITE);
@@ -2829,12 +2880,14 @@ public class FSNamesystem implements Nam
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot delete " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       if (!recursive && dir.isNonEmptyDirectory(src)) {
         throw new IOException(src + " is non empty");
       }
@@ -2961,9 +3014,14 @@ public class FSNamesystem implements Nam
     HdfsFileStatus stat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
+    if (!DFSUtil.isValidName(src)) {
+      throw new InvalidPathException("Invalid file name: " + src);
+    }
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       if (isPermissionEnabled) {
         checkTraverse(pc, src);
       }
@@ -3028,10 +3086,16 @@ public class FSNamesystem implements Nam
     }
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     HdfsFileStatus resultingStat = null;
     boolean status = false;
     writeLock();
     try {
+      checkOperation(OperationCategory.WRITE);   
+      if (isInSafeMode()) {
+        throw new SafeModeException("Cannot create directory " + src, safeMode);
+      }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       status = mkdirsInternal(pc, src, permissions, createParent);
       if (status) {
         resultingStat = dir.getFileInfo(src, false);
@@ -3053,10 +3117,6 @@ public class FSNamesystem implements Nam
       PermissionStatus permissions, boolean createParent) 
       throws IOException, UnresolvedLinkException {
     assert hasWriteLock();
-    checkOperation(OperationCategory.WRITE);   
-    if (isInSafeMode()) {
-      throw new SafeModeException("Cannot create directory " + src, safeMode);
-    }
     if (isPermissionEnabled) {
       checkTraverse(pc, src);
     }
@@ -3087,9 +3147,11 @@ public class FSNamesystem implements Nam
       FileNotFoundException, UnresolvedLinkException, StandbyException {
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, FsAction.READ_EXECUTE);
       }
@@ -3103,6 +3165,8 @@ public class FSNamesystem implements Nam
    * Set the namespace quota and diskspace quota for a directory.
    * See {@link ClientProtocol#setQuota(String, long, long)} for the 
    * contract.
+   * 
+   * Note: This does not support ".inodes" relative path.
    */
   void setQuota(String path, long nsQuota, long dsQuota) 
       throws IOException, UnresolvedLinkException {
@@ -3132,12 +3196,14 @@ public class FSNamesystem implements Nam
       throws IOException, UnresolvedLinkException {
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
     checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException("Cannot fsync file " + src, safeMode);
       }
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
       if (lastBlockLength > 0) {
         pendingFile.updateLengthOfLastBlock(lastBlockLength);
@@ -3488,9 +3554,11 @@ public class FSNamesystem implements Nam
     DirectoryListing dl;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
 
       if (isPermissionEnabled) {
         if (dir.isDir(src)) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Sat Apr 20 20:22:21 2013
@@ -21,7 +21,6 @@ import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -39,6 +38,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.Diff;
+import org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -50,7 +50,7 @@ import com.google.common.base.Preconditi
  * directory inodes.
  */
 @InterfaceAudience.Private
-public abstract class INode implements Diff.Element<byte[]> {
+public abstract class INode implements Diff.Element<byte[]>, LinkedElement {
   public static final Log LOG = LogFactory.getLog(INode.class);
 
   /** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/
@@ -108,6 +108,7 @@ public abstract class INode implements D
    * @return group name
    */
   abstract String getGroupName(Snapshot snapshot);
+  protected LinkedElement next = null;
 
   /** The same as getGroupName(null). */
   public final String getGroupName() {
@@ -612,13 +613,13 @@ public abstract class INode implements D
     if (that == null || !(that instanceof INode)) {
       return false;
     }
-    return Arrays.equals(this.getLocalNameBytes(),
-        ((INode)that).getLocalNameBytes());
+    return getId() == ((INode) that).getId();
   }
 
   @Override
   public final int hashCode() {
-    return Arrays.hashCode(getLocalNameBytes());
+    long id = getId();
+    return (int)(id^(id>>>32));  
   }
   
   /**
@@ -698,4 +699,14 @@ public abstract class INode implements D
       toDeleteList.clear();
     }
   }
+  
+  @Override
+  public void setNext(LinkedElement next) {
+    this.next = next;
+  }
+  
+  @Override
+  public LinkedElement getNext() {
+    return next;
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Sat Apr 20 20:22:21 2013
@@ -474,7 +474,10 @@ public class INodeDirectory extends INod
 
   /** Set the children list to null. */
   public void clearChildren() {
-    this.children = null;
+    if (children != null) {
+      this.children.clear();
+      this.children = null;
+    }
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java Sat Apr 20 20:22:21 2013
@@ -31,9 +31,11 @@ import org.apache.hadoop.util.Sequential
 @InterfaceAudience.Private
 public class INodeId extends SequentialNumber {
   /**
-   * The last reserved inode id. 
+   * The last reserved inode id. InodeIDs are allocated from LAST_RESERVED_ID +
+   * 1.
    */
-  public static final long LAST_RESERVED_ID = 1000L;
+  public static final long LAST_RESERVED_ID = 2 << 14 - 1;
+  public static final long ROOT_INODE_ID = LAST_RESERVED_ID + 1;
 
   /**
    * The inode id validation of lease check will be skipped when the request
@@ -55,6 +57,6 @@ public class INodeId extends SequentialN
   }
   
   INodeId() {
-    super(LAST_RESERVED_ID);
+    super(ROOT_INODE_ID);
   }
 }

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1469644-1470194

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1469644-1470194

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1469644-1470194

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1469644-1470194

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1469644-1470194

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Sat Apr 20 20:22:21 2013
@@ -20,17 +20,28 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,10 +49,17 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestINodeFile {
+  public static final Log LOG = LogFactory.getLog(TestINodeFile.class);
 
   static final short BLOCKBITS = 48;
   static final long BLKSIZE_MAXVALUE = ~(0xffffL << BLOCKBITS);
@@ -293,6 +311,7 @@ public class TestINodeFile {
         INodeDirectory.valueOf(from, path);
         fail();
       } catch(PathIsNotDirectoryException e) {
+        // Expected
       }
     }
 
@@ -314,7 +333,8 @@ public class TestINodeFile {
       try {
         INodeDirectory.valueOf(from, path);
         fail();
-      } catch(PathIsNotDirectoryException e) {
+      } catch(PathIsNotDirectoryException expected) {
+        // expected
       }
     }
 
@@ -345,13 +365,10 @@ public class TestINodeFile {
   }
 
   /**
-   * Verify root always has inode id 1001 and new formated fsimage has last
-   * allocated inode id 1000. Validate correct lastInodeId is persisted.
-   * @throws IOException
+   * This test verifies inode ID counter and inode map functionality.
    */
   @Test
   public void testInodeId() throws IOException {
-
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
         DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
@@ -361,55 +378,83 @@ public class TestINodeFile {
       cluster.waitActive();
 
       FSNamesystem fsn = cluster.getNamesystem();
-      assertTrue(fsn.getLastInodeId() == 1001);
+      long lastId = fsn.getLastInodeId();
+
+      // Ensure root has the correct inode ID
+      // Last inode ID should be root inode ID and inode map size should be 1
+      int inodeCount = 1;
+      long expectedLastInodeId = INodeId.ROOT_INODE_ID;
+      assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
+      assertEquals(expectedLastInodeId, lastId);
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
 
-      // Create one directory and the last inode id should increase to 1002
+      // Create a directory
+      // Last inode ID and inode map size should increase by 1
       FileSystem fs = cluster.getFileSystem();
       Path path = new Path("/test1");
       assertTrue(fs.mkdirs(path));
-      assertTrue(fsn.getLastInodeId() == 1002);
+      assertEquals(++expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
 
-      int fileLen = 1024;
-      Path filePath = new Path("/test1/file");
-      DFSTestUtil.createFile(fs, filePath, fileLen, (short) 1, 0);
-      assertTrue(fsn.getLastInodeId() == 1003);
+      // Create a file
+      // Last inode ID and inode map size should increase by 1
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+      DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
+      assertEquals(++expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
+      
+      // Ensure right inode ID is returned in file status
+      HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
+      assertEquals(expectedLastInodeId, fileStatus.getFileId());
 
-      // Rename doesn't increase inode id
+      // Rename a directory
+      // Last inode ID and inode map size should not change
       Path renamedPath = new Path("/test2");
-      fs.rename(path, renamedPath);
-      assertTrue(fsn.getLastInodeId() == 1003);
+      assertTrue(fs.rename(path, renamedPath));
+      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
+      
+      // Delete test2/file and test2 and ensure inode map size decreases
+      assertTrue(fs.delete(renamedPath, true));
+      inodeCount -= 2;
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
 
-      cluster.restartNameNode();
-      cluster.waitActive();
       // Make sure empty editlog can be handled
       cluster.restartNameNode();
       cluster.waitActive();
       fsn = cluster.getNamesystem();
-      assertTrue(fsn.getLastInodeId() == 1003);
+      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
+
+      // Create two inodes test2 and test2/file2
+      DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
+      expectedLastInodeId += 2;
+      inodeCount += 2;
+      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
 
-      DFSTestUtil.createFile(fs, new Path("/test2/file2"), fileLen, (short) 1,
-          0);
-      long id = fsn.getLastInodeId();
-      assertTrue(id == 1004);
-      fs.delete(new Path("/test2"), true);
-      // create a file under construction
+      // create /test3, and /test3/file.
+      // /test3/file is a file under construction
       FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
       assertTrue(outStream != null);
-      assertTrue(fsn.getLastInodeId() == 1006);
+      expectedLastInodeId += 2;
+      inodeCount += 2;
+      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
 
-      // Apply editlogs to fsimage, test fsimage with inodeUnderConstruction can
-      // be handled
+      // Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
       fsn.enterSafeMode(false);
       fsn.saveNamespace();
       fsn.leaveSafeMode();
 
       outStream.close();
 
-      // The lastInodeId in fsimage should remain 1006 after reboot
+      // The lastInodeId in fsimage should remain the same after reboot
       cluster.restartNameNode();
       cluster.waitActive();
       fsn = cluster.getNamesystem();
-      assertTrue(fsn.getLastInodeId() == 1006);
+      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
+      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -419,7 +464,6 @@ public class TestINodeFile {
 
   @Test
   public void testWriteToRenamedFile() throws IOException {
-
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .build();
@@ -451,9 +495,367 @@ public class TestINodeFile {
     } catch (Exception e) {
       /* Ignore */
     } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  private Path getInodePath(long inodeId, String remainingPath) {
+    StringBuilder b = new StringBuilder();
+    b.append(Path.SEPARATOR).append(FSDirectory.DOT_RESERVED_STRING)
+        .append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
+        .append(Path.SEPARATOR).append(inodeId).append(Path.SEPARATOR)
+        .append(remainingPath);
+    Path p = new Path(b.toString());
+    LOG.info("Inode path is " + p);
+    return p;
+  }
+  
+  /**
+   * Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
+   * operations.
+   */
+  @Test
+  public void testInodeIdBasedPaths() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+        DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
+      
+      // FileSystem#mkdirs "/testInodeIdBasedPaths"
+      Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
+      Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
+      fs.mkdirs(baseDir);
+      fs.exists(baseDir);
+      long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
+      
+      // FileSystem#create file and FileSystem#close
+      Path testFileInodePath = getInodePath(baseDirFileId, "test1");
+      Path testFileRegularPath = new Path(baseDir, "test1");
+      final int testFileBlockSize = 1024;
+      FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
+      assertTrue(fs.exists(testFileInodePath));
+      
+      // FileSystem#setPermission
+      FsPermission perm = new FsPermission((short)0666);
+      fs.setPermission(testFileInodePath, perm);
+      
+      // FileSystem#getFileStatus and FileSystem#getPermission
+      FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
+      assertEquals(perm, fileStatus.getPermission());
+      
+      // FileSystem#setOwner
+      fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
+      
+      // FileSystem#setTimes
+      fs.setTimes(testFileInodePath, 0, 0);
+      fileStatus = fs.getFileStatus(testFileInodePath);
+      assertEquals(0, fileStatus.getModificationTime());
+      assertEquals(0, fileStatus.getAccessTime());
+      
+      // FileSystem#setReplication
+      fs.setReplication(testFileInodePath, (short)3);
+      fileStatus = fs.getFileStatus(testFileInodePath);
+      assertEquals(3, fileStatus.getReplication());
+      fs.setReplication(testFileInodePath, (short)1);
+      
+      // ClientProtocol#getPreferredBlockSize
+      assertEquals(testFileBlockSize,
+          nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
+      
+      // symbolic link related tests
+      
+      // Reserved path is not allowed as a target
+      String invalidTarget = new Path(baseDir, "invalidTarget").toString();
+      String link = new Path(baseDir, "link").toString();
+      testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
+      
+      // Test creating a link using reserved inode path
+      String validTarget = "/validtarget";
+      testValidSymlinkTarget(nnRpc, validTarget, link);
+      
+      // FileSystem#append
+      fs.append(testFileInodePath);
+      // DistributedFileSystem#recoverLease
+      
+      fs.recoverLease(testFileInodePath);
+      
+      // Namenode#getBlockLocations
+      LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(),
+          0, Long.MAX_VALUE);
+      LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(),
+          0, Long.MAX_VALUE);
+      checkEquals(l1, l2);
+      
+      // FileSystem#rename - both the variants
+      Path renameDst = getInodePath(baseDirFileId, "test2");
+      fileStatus = fs.getFileStatus(testFileInodePath);
+      // Rename variant 1: rename and rename bacck
+      fs.rename(testFileInodePath, renameDst);
+      fs.rename(renameDst, testFileInodePath);
+      assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
+      
+      // Rename variant 2: rename and rename bacck
+      fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
+      fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
+      assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
+      
+      // FileSystem#getContentSummary
+      assertEquals(fs.getContentSummary(testFileRegularPath).toString(),
+          fs.getContentSummary(testFileInodePath).toString());
+      
+      // FileSystem#listFiles
+      checkEquals(fs.listFiles(baseDirRegPath, false),
+          fs.listFiles(baseDir, false));
+      
+      // FileSystem#delete
+      fs.delete(testFileInodePath, true);
+      assertFalse(fs.exists(testFileInodePath));
+    } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
   }
+  
+  private void testInvalidSymlinkTarget(NamenodeProtocols nnRpc,
+      String invalidTarget, String link) throws IOException {
+    try {
+      FsPermission perm = FsPermission.createImmutable((short)0755);
+      nnRpc.createSymlink(invalidTarget, link, perm, false);
+      fail("Symbolic link creation of target " + invalidTarget + " should fail");
+    } catch (InvalidPathException expected) {
+      // Expected
+    }
+  }
+
+  private void testValidSymlinkTarget(NamenodeProtocols nnRpc, String target,
+      String link) throws IOException {
+    FsPermission perm = FsPermission.createImmutable((short)0755);
+    nnRpc.createSymlink(target, link, perm, false);
+    assertEquals(target, nnRpc.getLinkTarget(link));
+  }
+  
+  private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) {
+    List<LocatedBlock> list1 = l1.getLocatedBlocks();
+    List<LocatedBlock> list2 = l2.getLocatedBlocks();
+    assertEquals(list1.size(), list2.size());
+    
+    for (int i = 0; i < list1.size(); i++) {
+      LocatedBlock b1 = list1.get(i);
+      LocatedBlock b2 = list2.get(i);
+      assertEquals(b1.getBlock(), b2.getBlock());
+      assertEquals(b1.getBlockSize(), b2.getBlockSize());
+    }
+  }
+
+  private static void checkEquals(RemoteIterator<LocatedFileStatus> i1,
+      RemoteIterator<LocatedFileStatus> i2) throws IOException {
+    while (i1.hasNext()) {
+      assertTrue(i2.hasNext());
+      
+      // Compare all the fields but the path name, which is relative
+      // to the original path from listFiles.
+      LocatedFileStatus l1 = i1.next();
+      LocatedFileStatus l2 = i2.next();
+      assertEquals(l1.getAccessTime(), l2.getAccessTime());
+      assertEquals(l1.getBlockSize(), l2.getBlockSize());
+      assertEquals(l1.getGroup(), l2.getGroup());
+      assertEquals(l1.getLen(), l2.getLen());
+      assertEquals(l1.getModificationTime(), l2.getModificationTime());
+      assertEquals(l1.getOwner(), l2.getOwner());
+      assertEquals(l1.getPermission(), l2.getPermission());
+      assertEquals(l1.getReplication(), l2.getReplication());
+    }
+    assertFalse(i2.hasNext());
+  }
+  
+  /**
+   * Check /.reserved path is reserved and cannot be created.
+   */
+  @Test
+  public void testReservedFileNames() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      // First start a cluster with reserved file names check turned off
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      
+      // Creation of directory or file with reserved path names is disallowed
+      ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
+      ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
+      Path reservedPath = new Path("/.reserved");
+      
+      // Loading of fsimage or editlog with /.reserved directory should fail
+      // Mkdir "/.reserved reserved path with reserved path check turned off
+      FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
+      fs.mkdirs(reservedPath);
+      assertTrue(fs.isDirectory(reservedPath));
+      ensureReservedFileNamesCannotBeLoaded(cluster);
+
+      // Loading of fsimage or editlog with /.reserved file should fail
+      // Create file "/.reserved reserved path with reserved path check turned off
+      FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
+      ensureClusterRestartSucceeds(cluster);
+      fs.delete(reservedPath, true);
+      DFSTestUtil.createFile(fs, reservedPath, 10, (short)1, 0L);
+      assertTrue(!fs.isDirectory(reservedPath));
+      ensureReservedFileNamesCannotBeLoaded(cluster);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  private void ensureReservedFileNamesCannotBeCreated(FileSystem fs, String name,
+      boolean isDir) {
+    // Creation of directory or file with reserved path names is disallowed
+    Path reservedPath = new Path(name);
+    try {
+      if (isDir) {
+        fs.mkdirs(reservedPath);
+      } else {
+        DFSTestUtil.createFile(fs, reservedPath, 10, (short) 1, 0L);
+      }
+      fail((isDir ? "mkdir" : "create file") + " should be disallowed");
+    } catch (Exception expected) {
+      // ignored
+    }
+  }
+  
+  private void ensureReservedFileNamesCannotBeLoaded(MiniDFSCluster cluster)
+      throws IOException {
+    // Turn on reserved file name checking. Loading of edits should fail
+    FSDirectory.CHECK_RESERVED_FILE_NAMES = true;
+    ensureClusterRestartFails(cluster);
+
+    // Turn off reserved file name checking and successfully load edits
+    FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
+    ensureClusterRestartSucceeds(cluster);
+
+    // Turn on reserved file name checking. Loading of fsimage should fail
+    FSDirectory.CHECK_RESERVED_FILE_NAMES = true;
+    ensureClusterRestartFails(cluster);
+  }
+  
+  private void ensureClusterRestartFails(MiniDFSCluster cluster) {
+    try {
+      cluster.restartNameNode();
+      fail("Cluster should not have successfully started");
+    } catch (Exception expected) {
+      LOG.info("Expected exception thrown " + expected);
+    }
+    assertFalse(cluster.isClusterUp());
+  }
+  
+  private void ensureClusterRestartSucceeds(MiniDFSCluster cluster)
+      throws IOException {
+    cluster.restartNameNode();
+    cluster.waitActive();
+    assertTrue(cluster.isClusterUp());
+  }
+  
+  /**
+   * For a given path, build a tree of INodes and return the leaf node.
+   */
+  private INode createTreeOfInodes(String path) throws QuotaExceededException {
+    byte[][] components = INode.getPathComponents(path);
+    FsPermission perm = FsPermission.createImmutable((short)0755);
+    PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
+    
+    long id = 0;
+    INodeDirectory prev = new INodeDirectory(++id, null, permstatus, 0);
+    INodeDirectory dir = null;
+    for (byte[] component : components) {
+      if (component.length == 0) {
+        continue;
+      }
+      System.out.println("Adding component " + DFSUtil.bytes2String(component));
+      dir = new INodeDirectory(++id, component, permstatus, 0);
+      prev.addChild(dir, false, null);
+      prev = dir;
+    }
+    return dir; // Last Inode in the chain
+  }
+  
+  private static void checkEquals(byte[][] expected, byte[][] actual) {
+    assertEquals(expected.length, actual.length);
+    int i = 0;
+    for (byte[] e : expected) {
+      assertTrue(Arrays.equals(e, actual[i++]));
+    }
+  }
+  
+  /**
+   * Test for {@link FSDirectory#getPathComponents(INode)}
+   */
+  @Test
+  public void testGetPathFromInode() throws QuotaExceededException {
+    String path = "/a/b/c";
+    INode inode = createTreeOfInodes(path);
+    byte[][] expected = INode.getPathComponents(path);
+    byte[][] actual = FSDirectory.getPathComponents(inode);
+    checkEquals(expected, actual);
+  }
+  
+  /**
+   * Tests for {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
+   */
+  @Test
+  public void testInodePath() throws IOException {
+    // For a non .inodes path the regular components are returned
+    String path = "/a/b/c";
+    INode inode = createTreeOfInodes(path);
+    // For an any inode look up return inode corresponding to "c" from /a/b/c
+    FSDirectory fsd = Mockito.mock(FSDirectory.class);
+    Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong());
+    
+    // Null components
+    assertEquals("/test", FSDirectory.resolvePath("/test", null, fsd));
+    
+    // Tests for FSDirectory#resolvePath()
+    // Non inode regular path
+    byte[][] components = INode.getPathComponents(path);
+    String resolvedPath = FSDirectory.resolvePath(path, components, fsd);
+    assertEquals(path, resolvedPath);
+    
+    // Inode path with no trailing separator
+    components = INode.getPathComponents("/.reserved/.inodes/1");
+    resolvedPath = FSDirectory.resolvePath(path, components, fsd);
+    assertEquals(path, resolvedPath);
+    
+    // Inode path with trailing separator
+    components = INode.getPathComponents("/.reserved/.inodes/1/");
+    assertEquals(path, resolvedPath);
+    
+    // Inode relative path
+    components = INode.getPathComponents("/.reserved/.inodes/1/d/e/f");
+    resolvedPath = FSDirectory.resolvePath(path, components, fsd);
+    assertEquals("/a/b/c/d/e/f", resolvedPath);
+    
+    // A path with just .inodes  returns the path as is
+    String testPath = "/.reserved/.inodes";
+    components = INode.getPathComponents(testPath);
+    resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
+    assertEquals(testPath, resolvedPath);
+    
+    // Root inode path
+    testPath = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
+    components = INode.getPathComponents(testPath);
+    resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
+    assertEquals("/", resolvedPath);
+    
+    // An invalid inode path should remain unresolved
+    testPath = "/.invalid/.inodes/1";
+    components = INode.getPathComponents(testPath);
+    resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
+    assertEquals(testPath, resolvedPath);
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java?rev=1470225&r1=1470224&r2=1470225&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java Sat Apr 20 20:22:21 2013
@@ -422,6 +422,11 @@ public class TestPipelinesFailover {
     // Disable permissions so that another user can recover the lease.
     harness.conf.setBoolean(
         DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+    // This test triggers rapid NN failovers.  The client retry policy uses an
+    // exponential backoff.  This can quickly lead to long sleep times and even
+    // timeout the whole test.  Cap the sleep time at 1s to prevent this.
+    harness.conf.setInt(DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
+      1000);
 
     final MiniDFSCluster cluster = harness.startCluster();
     try {
@@ -537,11 +542,10 @@ public class TestPipelinesFailover {
   }
   
   /**
-   * Try to cover the lease on the given file for up to 30
-   * seconds.
+   * Try to recover the lease on the given file for up to 60 seconds.
    * @param fsOtherUser the filesystem to use for the recoverLease call
    * @param testPath the path on which to run lease recovery
-   * @throws TimeoutException if lease recover does not succeed within 30
+   * @throws TimeoutException if lease recover does not succeed within 60
    * seconds
    * @throws InterruptedException if the thread is interrupted
    */
@@ -564,7 +568,7 @@ public class TestPipelinesFailover {
           }
           return success;
         }
-      }, 1000, 30000);
+      }, 1000, 60000);
     } catch (TimeoutException e) {
       throw new TimeoutException("Timed out recovering lease for " +
           testPath);