You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC

svn commit: r1619012 [19/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java Tue Aug 19 23:49:39 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
@@ -136,6 +137,7 @@ class FSPermissionChecker {
    * @param subAccess If path is a directory,
    * it is the access required of the path and all the sub-directories.
    * If path is not a directory, there is no effect.
+   * @param ignoreEmptyDir Ignore permission checking for empty directory?
    * @param resolveLink whether to resolve the final path component if it is
    * a symlink
    * @throws AccessControlException
@@ -144,9 +146,9 @@ class FSPermissionChecker {
    * Guarded by {@link FSNamesystem#readLock()}
    * Caller of this method must hold that lock.
    */
-  void checkPermission(String path, INodeDirectory root, boolean doCheckOwner,
+  void checkPermission(String path, FSDirectory dir, boolean doCheckOwner,
       FsAction ancestorAccess, FsAction parentAccess, FsAction access,
-      FsAction subAccess, boolean resolveLink)
+      FsAction subAccess, boolean ignoreEmptyDir, boolean resolveLink)
       throws AccessControlException, UnresolvedLinkException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("ACCESS CHECK: " + this
@@ -155,11 +157,12 @@ class FSPermissionChecker {
           + ", parentAccess=" + parentAccess
           + ", access=" + access
           + ", subAccess=" + subAccess
+          + ", ignoreEmptyDir=" + ignoreEmptyDir
           + ", resolveLink=" + resolveLink);
     }
     // check if (parentAccess != null) && file exists, then check sb
     // If resolveLink, the check is performed on the link target.
-    final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink);
+    final INodesInPath inodesInPath = dir.getINodesInPath(path, resolveLink);
     final int snapshotId = inodesInPath.getPathSnapshotId();
     final INode[] inodes = inodesInPath.getINodes();
     int ancestorIndex = inodes.length - 2;
@@ -182,7 +185,7 @@ class FSPermissionChecker {
       check(last, snapshotId, access);
     }
     if (subAccess != null) {
-      checkSubAccess(last, snapshotId, subAccess);
+      checkSubAccess(last, snapshotId, subAccess, ignoreEmptyDir);
     }
     if (doCheckOwner) {
       checkOwner(last, snapshotId);
@@ -207,8 +210,8 @@ class FSPermissionChecker {
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkSubAccess(INode inode, int snapshotId, FsAction access
-      ) throws AccessControlException {
+  private void checkSubAccess(INode inode, int snapshotId, FsAction access,
+      boolean ignoreEmptyDir) throws AccessControlException {
     if (inode == null || !inode.isDirectory()) {
       return;
     }
@@ -216,9 +219,12 @@ class FSPermissionChecker {
     Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
     for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
       INodeDirectory d = directories.pop();
-      check(d, snapshotId, access);
+      ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
+      if (!(cList.isEmpty() && ignoreEmptyDir)) {
+        check(d, snapshotId, access);
+      }
 
-      for(INode child : d.getChildrenList(snapshotId)) {
+      for(INode child : cList) {
         if (child.isDirectory()) {
           directories.push(child.asDirectory());
         }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Tue Aug 19 23:49:39 2014
@@ -43,6 +43,8 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.io.nativeio.NativeIO;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -69,6 +71,8 @@ public class FileJournalManager implemen
     NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
   private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
     NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
+  private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile(
+      NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)");
 
   private File currentInProgress = null;
 
@@ -132,10 +136,14 @@ public class FileJournalManager implemen
     Preconditions.checkState(!dstFile.exists(),
         "Can't finalize edits file " + inprogressFile + " since finalized file " +
         "already exists");
-    if (!inprogressFile.renameTo(dstFile)) {
+
+    try {
+      NativeIO.renameTo(inprogressFile, dstFile);
+    } catch (IOException e) {
       errorReporter.reportErrorOnFile(dstFile);
-      throw new IllegalStateException("Unable to finalize edits file " + inprogressFile);
+      throw new IllegalStateException("Unable to finalize edits file " + inprogressFile, e);
     }
+
     if (inprogressFile.equals(currentInProgress)) {
       currentInProgress = null;
     }
@@ -156,8 +164,7 @@ public class FileJournalManager implemen
       throws IOException {
     LOG.info("Purging logs older than " + minTxIdToKeep);
     File[] files = FileUtil.listFiles(sd.getCurrentDir());
-    List<EditLogFile> editLogs = 
-      FileJournalManager.matchEditLogs(files);
+    List<EditLogFile> editLogs = matchEditLogs(files, true);
     for (EditLogFile log : editLogs) {
       if (log.getFirstTxId() < minTxIdToKeep &&
           log.getLastTxId() < minTxIdToKeep) {
@@ -168,7 +175,7 @@ public class FileJournalManager implemen
 
   /**
    * Find all editlog segments starting at or above the given txid.
-   * @param fromTxId the txnid which to start looking
+   * @param firstTxId the txnid which to start looking
    * @param inProgressOk whether or not to include the in-progress edit log 
    *        segment       
    * @return a list of remote edit logs
@@ -238,8 +245,13 @@ public class FileJournalManager implemen
   public static List<EditLogFile> matchEditLogs(File logDir) throws IOException {
     return matchEditLogs(FileUtil.listFiles(logDir));
   }
-  
+
   static List<EditLogFile> matchEditLogs(File[] filesInStorage) {
+    return matchEditLogs(filesInStorage, false);
+  }
+
+  private static List<EditLogFile> matchEditLogs(File[] filesInStorage,
+      boolean forPurging) {
     List<EditLogFile> ret = Lists.newArrayList();
     for (File f : filesInStorage) {
       String name = f.getName();
@@ -250,6 +262,7 @@ public class FileJournalManager implemen
           long startTxId = Long.parseLong(editsMatch.group(1));
           long endTxId = Long.parseLong(editsMatch.group(2));
           ret.add(new EditLogFile(f, startTxId, endTxId));
+          continue;
         } catch (NumberFormatException nfe) {
           LOG.error("Edits file " + f + " has improperly formatted " +
                     "transaction ID");
@@ -264,12 +277,30 @@ public class FileJournalManager implemen
           long startTxId = Long.parseLong(inProgressEditsMatch.group(1));
           ret.add(
               new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true));
+          continue;
         } catch (NumberFormatException nfe) {
           LOG.error("In-progress edits file " + f + " has improperly " +
                     "formatted transaction ID");
           // skip
         }
       }
+      if (forPurging) {
+        // Check for in-progress stale edits
+        Matcher staleInprogressEditsMatch = EDITS_INPROGRESS_STALE_REGEX
+            .matcher(name);
+        if (staleInprogressEditsMatch.matches()) {
+          try {
+            long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1));
+            ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID,
+                true));
+            continue;
+          } catch (NumberFormatException nfe) {
+            LOG.error("In-progress stale edits file " + f + " has improperly "
+                + "formatted transaction ID");
+            // skip
+          }
+        }
+      }
     }
     return ret;
   }
@@ -513,11 +544,16 @@ public class FileJournalManager implemen
       File src = file;
       File dst = new File(src.getParent(), src.getName() + newSuffix);
       // renameTo fails on Windows if the destination file already exists.
-      if (!src.renameTo(dst)) {
-        if (!dst.delete() || !src.renameTo(dst)) {
-          throw new IOException(
-            "Couldn't rename log " + src + " to " + dst);
+      try {
+        if (dst.exists()) {
+          if (!dst.delete()) {
+            throw new IOException("Couldn't delete " + dst);
+          }
         }
+        NativeIO.renameTo(src, dst);
+      } catch (IOException e) {
+        throw new IOException(
+            "Couldn't rename log " + src + " to " + dst, e);
       }
       file = dst;
     }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java Tue Aug 19 23:49:39 2014
@@ -32,15 +32,10 @@ import org.apache.hadoop.hdfs.server.nam
 public class FileUnderConstructionFeature implements INode.Feature {
   private String clientName; // lease holder
   private final String clientMachine;
-  // if client is a cluster node too.
-  private final DatanodeDescriptor clientNode;
 
-  public FileUnderConstructionFeature(final String clientName,
-      final String clientMachine,
-      final DatanodeDescriptor clientNode) {
+  public FileUnderConstructionFeature(final String clientName, final String clientMachine) {
     this.clientName = clientName;
     this.clientMachine = clientMachine;
-    this.clientNode = clientNode;
   }
 
   public String getClientName() {
@@ -55,10 +50,6 @@ public class FileUnderConstructionFeatur
     return clientMachine;
   }
 
-  public DatanodeDescriptor getClientNode() {
-    return clientNode;
-  }
-
   /**
    * Update the length for the last block
    *

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Tue Aug 19 23:49:39 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.StringWriter;
-import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -98,9 +97,9 @@ public abstract class INode implements I
   /** Set user */
   final INode setUser(String user, int latestSnapshotId)
       throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.setUser(user);
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    setUser(user);
+    return this;
   }
   /**
    * @param snapshotId
@@ -123,9 +122,9 @@ public abstract class INode implements I
   /** Set group */
   final INode setGroup(String group, int latestSnapshotId)
       throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.setGroup(group);
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    setGroup(group);
+    return this;
   }
 
   /**
@@ -149,9 +148,9 @@ public abstract class INode implements I
   /** Set the {@link FsPermission} of this {@link INode} */
   INode setPermission(FsPermission permission, int latestSnapshotId) 
       throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.setPermission(permission);
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    setPermission(permission);
+    return this;
   }
 
   abstract AclFeature getAclFeature(int snapshotId);
@@ -165,18 +164,56 @@ public abstract class INode implements I
 
   final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId)
       throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.addAclFeature(aclFeature);
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    addAclFeature(aclFeature);
+    return this;
   }
 
   abstract void removeAclFeature();
 
   final INode removeAclFeature(int latestSnapshotId)
       throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.removeAclFeature();
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    removeAclFeature();
+    return this;
+  }
+
+  /**
+   * @param snapshotId
+   *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
+   *          from the given snapshot; otherwise, get the result from the
+   *          current inode.
+   * @return XAttrFeature
+   */  
+  abstract XAttrFeature getXAttrFeature(int snapshotId);
+  
+  @Override
+  public final XAttrFeature getXAttrFeature() {
+    return getXAttrFeature(Snapshot.CURRENT_STATE_ID);
+  }
+  
+  /**
+   * Set <code>XAttrFeature</code> 
+   */
+  abstract void addXAttrFeature(XAttrFeature xAttrFeature);
+  
+  final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId) 
+      throws QuotaExceededException {
+    recordModification(latestSnapshotId);
+    addXAttrFeature(xAttrFeature);
+    return this;
+  }
+  
+  /**
+   * Remove <code>XAttrFeature</code> 
+   */
+  abstract void removeXAttrFeature();
+  
+  final INode removeXAttrFeature(int lastestSnapshotId)
+      throws QuotaExceededException {
+    recordModification(lastestSnapshotId);
+    removeXAttrFeature();
+    return this;
   }
   
   /**
@@ -261,11 +298,8 @@ public abstract class INode implements I
    * @param latestSnapshotId The id of the latest snapshot that has been taken.
    *                         Note that it is {@link Snapshot#CURRENT_STATE_ID} 
    *                         if no snapshots have been taken.
-   * @return The current inode, which usually is the same object of this inode.
-   *         However, in some cases, this inode may be replaced with a new inode
-   *         for maintaining snapshots. The current inode is then the new inode.
    */
-  abstract INode recordModification(final int latestSnapshotId)
+  abstract void recordModification(final int latestSnapshotId)
       throws QuotaExceededException;
 
   /** Check whether it's a reference. */
@@ -615,9 +649,9 @@ public abstract class INode implements I
   /** Set the last modification time of inode. */
   public final INode setModificationTime(long modificationTime,
       int latestSnapshotId) throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.setModificationTime(modificationTime);
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    setModificationTime(modificationTime);
+    return this;
   }
 
   /**
@@ -645,19 +679,19 @@ public abstract class INode implements I
    */
   public final INode setAccessTime(long accessTime, int latestSnapshotId)
       throws QuotaExceededException {
-    final INode nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.setAccessTime(accessTime);
-    return nodeToUpdate;
+    recordModification(latestSnapshotId);
+    setAccessTime(accessTime);
+    return this;
   }
 
 
   /**
-   * Breaks file path into components.
-   * @param path
-   * @return array of byte arrays each of which represents 
+   * Breaks {@code path} into components.
+   * @return array of byte arrays each of which represents
    * a single path component.
    */
-  static byte[][] getPathComponents(String path) {
+  @VisibleForTesting
+  public static byte[][] getPathComponents(String path) {
     return getPathComponents(getPathNames(path));
   }
 
@@ -673,8 +707,7 @@ public abstract class INode implements I
   }
 
   /**
-   * Splits an absolute path into an array of path components.
-   * @param path
+   * Splits an absolute {@code path} into an array of path components.
    * @throws AssertionError if the given path is invalid.
    * @return array of path components.
    */

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java Tue Aug 19 23:49:39 2014
@@ -21,6 +21,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * The attributes of an inode.
@@ -50,6 +51,9 @@ public interface INodeAttributes {
 
   /** @return the ACL feature. */
   public AclFeature getAclFeature();
+  
+  /** @return the XAttrs feature. */
+  public XAttrFeature getXAttrFeature();
 
   /** @return the modification time. */
   public long getModificationTime();
@@ -64,14 +68,17 @@ public interface INodeAttributes {
     private final AclFeature aclFeature;
     private final long modificationTime;
     private final long accessTime;
+    private XAttrFeature xAttrFeature;
 
     SnapshotCopy(byte[] name, PermissionStatus permissions,
-        AclFeature aclFeature, long modificationTime, long accessTime) {
+        AclFeature aclFeature, long modificationTime, long accessTime, 
+        XAttrFeature xAttrFeature) {
       this.name = name;
       this.permission = PermissionStatusFormat.toLong(permissions);
       this.aclFeature = aclFeature;
       this.modificationTime = modificationTime;
       this.accessTime = accessTime;
+      this.xAttrFeature = xAttrFeature;
     }
 
     SnapshotCopy(INode inode) {
@@ -80,6 +87,7 @@ public interface INodeAttributes {
       this.aclFeature = inode.getAclFeature();
       this.modificationTime = inode.getModificationTime();
       this.accessTime = inode.getAccessTime();
+      this.xAttrFeature = inode.getXAttrFeature();
     }
 
     @Override
@@ -89,14 +97,12 @@ public interface INodeAttributes {
 
     @Override
     public final String getUserName() {
-      final int n = (int)PermissionStatusFormat.USER.retrieve(permission);
-      return SerialNumberManager.INSTANCE.getUser(n);
+      return PermissionStatusFormat.getUser(permission);
     }
 
     @Override
     public final String getGroupName() {
-      final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
-      return SerialNumberManager.INSTANCE.getGroup(n);
+      return PermissionStatusFormat.getGroup(permission);
     }
 
     @Override
@@ -106,7 +112,7 @@ public interface INodeAttributes {
 
     @Override
     public final short getFsPermissionShort() {
-      return (short)PermissionStatusFormat.MODE.retrieve(permission);
+      return PermissionStatusFormat.getMode(permission);
     }
     
     @Override
@@ -128,5 +134,10 @@ public interface INodeAttributes {
     public final long getAccessTime() {
       return accessTime;
     }
+    
+    @Override
+    public final XAttrFeature getXAttrFeature() {
+      return xAttrFeature;
+    }
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Tue Aug 19 23:49:39 2014
@@ -26,15 +26,14 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
-import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.Diff.ListType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -104,11 +103,6 @@ public class INodeDirectory extends INod
     return this;
   }
 
-  /** Is this a snapshottable directory? */
-  public boolean isSnapshottable() {
-    return false;
-  }
-
   void setQuota(long nsQuota, long dsQuota) {
     DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
     if (quota != null) {
@@ -163,7 +157,7 @@ public class INodeDirectory extends INod
     return quota;
   }
 
-  private int searchChildren(byte[] name) {
+  int searchChildren(byte[] name) {
     return children == null? -1: Collections.binarySearch(children, name);
   }
   
@@ -188,7 +182,7 @@ public class INodeDirectory extends INod
   public final boolean isWithSnapshot() {
     return getDirectoryWithSnapshotFeature() != null;
   }
-  
+
   public DirectoryDiffList getDiffs() {
     DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     return sf != null ? sf.getDiffs() : null;
@@ -206,50 +200,71 @@ public class INodeDirectory extends INod
     return super.toDetailString() + (sf == null ? "" : ", " + sf.getDiffs()); 
   }
 
-  /** Replace itself with an {@link INodeDirectorySnapshottable}. */
-  public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
-      int latestSnapshotId, final INodeMap inodeMap)
-      throws QuotaExceededException {
-    Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
-        "this is already an INodeDirectorySnapshottable, this=%s", this);
-    final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
-    replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
-        .saveSelf2Snapshot(latestSnapshotId, s, this);
-    return s;
-  }
-
-  /** Replace itself with {@link INodeDirectory}. */
-  public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
-    Preconditions.checkState(getClass() != INodeDirectory.class,
-        "the class is already INodeDirectory, this=%s", this);
-    return replaceSelf(new INodeDirectory(this, true, this.getFeatures()),
-      inodeMap);
+  public DirectorySnapshottableFeature getDirectorySnapshottableFeature() {
+    return getFeature(DirectorySnapshottableFeature.class);
   }
 
-  /** Replace itself with the given directory. */
-  private final <N extends INodeDirectory> N replaceSelf(final N newDir,
-      final INodeMap inodeMap) {
-    final INodeReference ref = getParentReference();
-    if (ref != null) {
-      ref.setReferredINode(newDir);
-      if (inodeMap != null) {
-        inodeMap.put(newDir);
-      }
-    } else {
-      final INodeDirectory parent = getParent();
-      Preconditions.checkArgument(parent != null, "parent is null, this=%s", this);
-      parent.replaceChild(this, newDir, inodeMap);
+  public boolean isSnapshottable() {
+    return getDirectorySnapshottableFeature() != null;
+  }
+
+  public Snapshot getSnapshot(byte[] snapshotName) {
+    return getDirectorySnapshottableFeature().getSnapshot(snapshotName);
+  }
+
+  public void setSnapshotQuota(int snapshotQuota) {
+    getDirectorySnapshottableFeature().setSnapshotQuota(snapshotQuota);
+  }
+
+  public Snapshot addSnapshot(int id, String name) throws SnapshotException,
+      QuotaExceededException {
+    return getDirectorySnapshottableFeature().addSnapshot(this, id, name);
+  }
+
+  public Snapshot removeSnapshot(String snapshotName,
+      BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+      throws SnapshotException {
+    return getDirectorySnapshottableFeature().removeSnapshot(this,
+        snapshotName, collectedBlocks, removedINodes);
+  }
+
+  public void renameSnapshot(String path, String oldName, String newName)
+      throws SnapshotException {
+    getDirectorySnapshottableFeature().renameSnapshot(path, oldName, newName);
+  }
+
+  /** add DirectorySnapshottableFeature */
+  public void addSnapshottableFeature() {
+    Preconditions.checkState(!isSnapshottable(),
+        "this is already snapshottable, this=%s", this);
+    DirectoryWithSnapshotFeature s = this.getDirectoryWithSnapshotFeature();
+    final DirectorySnapshottableFeature snapshottable =
+        new DirectorySnapshottableFeature(s);
+    if (s != null) {
+      this.removeFeature(s);
     }
-    clear();
-    return newDir;
+    this.addFeature(snapshottable);
   }
-  
+
+  /** remove DirectorySnapshottableFeature */
+  public void removeSnapshottableFeature() {
+    DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
+    Preconditions.checkState(s != null,
+        "The dir does not have snapshottable feature: this=%s", this);
+    this.removeFeature(s);
+    if (s.getDiffs().asList().size() > 0) {
+      // add a DirectoryWithSnapshotFeature back
+      DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(
+          s.getDiffs());
+      addFeature(sf);
+    }
+  }
+
   /** 
    * Replace the given child with a new child. Note that we no longer need to
    * replace an normal INodeDirectory or INodeFile into an
    * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
-   * for child replacement is for {@link INodeDirectorySnapshottable} and 
-   * reference nodes.
+   * for child replacement is for reference nodes.
    */
   public void replaceChild(INode oldChild, final INode newChild,
       final INodeMap inodeMap) {
@@ -303,7 +318,7 @@ public class INodeDirectory extends INod
   }
 
   @Override
-  public INodeDirectory recordModification(int latestSnapshotId) 
+  public void recordModification(int latestSnapshotId)
       throws QuotaExceededException {
     if (isInLatestSnapshot(latestSnapshotId)
         && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@@ -315,7 +330,6 @@ public class INodeDirectory extends INod
       // record self in the diff list if necessary
       sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
     }
-    return this;
   }
 
   /**
@@ -356,6 +370,29 @@ public class INodeDirectory extends INod
     
     return sf.getChild(this, name, snapshotId);
   }
+
+  /**
+   * Search for the given INode in the children list and the deleted lists of
+   * snapshots.
+   * @return {@link Snapshot#CURRENT_STATE_ID} if the inode is in the children
+   * list; {@link Snapshot#NO_SNAPSHOT_ID} if the inode is neither in the
+   * children list nor in any snapshot; otherwise the snapshot id of the
+   * corresponding snapshot diff list.
+   */
+  public int searchChild(INode inode) {
+    INode child = getChild(inode.getLocalNameBytes(), Snapshot.CURRENT_STATE_ID);
+    if (child != inode) {
+      // inode is not in parent's children list, thus inode must be in
+      // snapshot. identify the snapshot id and later add it into the path
+      DirectoryDiffList diffs = getDiffs();
+      if (diffs == null) {
+        return Snapshot.NO_SNAPSHOT_ID;
+      }
+      return diffs.findSnapshotDeleted(inode);
+    } else {
+      return Snapshot.CURRENT_STATE_ID;
+    }
+  }
   
   /**
    * @param snapshotId
@@ -380,53 +417,6 @@ public class INodeDirectory extends INod
         : ReadOnlyList.Util.asReadOnlyList(children);
   }
 
-  /** @return the {@link INodesInPath} containing only the last inode. */
-  INodesInPath getLastINodeInPath(String path, boolean resolveLink
-      ) throws UnresolvedLinkException {
-    return INodesInPath.resolve(this, getPathComponents(path), 1, resolveLink);
-  }
-
-  /** @return the {@link INodesInPath} containing all inodes in the path. */
-  INodesInPath getINodesInPath(String path, boolean resolveLink
-      ) throws UnresolvedLinkException {
-    final byte[][] components = getPathComponents(path);
-    return INodesInPath.resolve(this, components, components.length, resolveLink);
-  }
-
-  /** @return the last inode in the path. */
-  INode getNode(String path, boolean resolveLink) 
-    throws UnresolvedLinkException {
-    return getLastINodeInPath(path, resolveLink).getINode(0);
-  }
-
-  /**
-   * @return the INode of the last component in src, or null if the last
-   * component does not exist.
-   * @throws UnresolvedLinkException if symlink can't be resolved
-   * @throws SnapshotAccessControlException if path is in RO snapshot
-   */
-  INode getINode4Write(String src, boolean resolveLink)
-      throws UnresolvedLinkException, SnapshotAccessControlException {
-    return getINodesInPath4Write(src, resolveLink).getLastINode();
-  }
-
-  /**
-   * @return the INodesInPath of the components in src
-   * @throws UnresolvedLinkException if symlink can't be resolved
-   * @throws SnapshotAccessControlException if path is in RO snapshot
-   */
-  INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
-      throws UnresolvedLinkException, SnapshotAccessControlException {
-    final byte[][] components = INode.getPathComponents(src);
-    INodesInPath inodesInPath = INodesInPath.resolve(this, components,
-        components.length, resolveLink);
-    if (inodesInPath.isSnapshot()) {
-      throw new SnapshotAccessControlException(
-          "Modification on a read-only snapshot is disallowed");
-    }
-    return inodesInPath;
-  }
-
   /**
    * Given a child's name, return the index of the next child
    *
@@ -788,7 +778,9 @@ public class INodeDirectory extends INod
   public boolean metadataEquals(INodeDirectoryAttributes other) {
     return other != null
         && getQuotaCounts().equals(other.getQuotaCounts())
-        && getPermissionLong() == other.getPermissionLong();
+        && getPermissionLong() == other.getPermissionLong()
+        && getAclFeature() == other.getAclFeature()
+        && getXAttrFeature() == other.getXAttrFeature();
   }
   
   /*
@@ -846,6 +838,11 @@ public class INodeDirectory extends INod
         };
       }
     });
+
+    final DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
+    if (s != null) {
+      s.dumpTreeRecursively(this, out, prefix, snapshot);
+    }
   }
 
   /**
@@ -854,7 +851,7 @@ public class INodeDirectory extends INod
    * @param subs The subtrees.
    */
   @VisibleForTesting
-  protected static void dumpTreeRecursively(PrintWriter out,
+  public static void dumpTreeRecursively(PrintWriter out,
       StringBuilder prefix, Iterable<SnapshotAndINode> subs) {
     if (subs != null) {
       for(final Iterator<SnapshotAndINode> i = subs.iterator(); i.hasNext();) {
@@ -867,7 +864,7 @@ public class INodeDirectory extends INod
   }
 
   /** A pair of Snapshot and INode objects. */
-  protected static class SnapshotAndINode {
+  public static class SnapshotAndINode {
     public final int snapshotId;
     public final INode inode;
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java Tue Aug 19 23:49:39 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 import com.google.common.base.Preconditions;
 
@@ -35,8 +36,9 @@ public interface INodeDirectoryAttribute
   public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
       implements INodeDirectoryAttributes {
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
-        AclFeature aclFeature, long modificationTime) {
-      super(name, permissions, aclFeature, modificationTime, 0L);
+        AclFeature aclFeature, long modificationTime, 
+        XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature);
     }
 
     public SnapshotCopy(INodeDirectory dir) {
@@ -51,8 +53,10 @@ public interface INodeDirectoryAttribute
     @Override
     public boolean metadataEquals(INodeDirectoryAttributes other) {
       return other != null
-          && this.getQuotaCounts().equals(other.getQuotaCounts())
-          && getPermissionLong() == other.getPermissionLong();
+          && getQuotaCounts().equals(other.getQuotaCounts())
+          && getPermissionLong() == other.getPermissionLong()
+          && getAclFeature() == other.getAclFeature()
+          && getXAttrFeature() == other.getXAttrFeature();
     }
   }
 
@@ -63,8 +67,8 @@ public interface INodeDirectoryAttribute
 
     public CopyWithQuota(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long nsQuota,
-        long dsQuota) {
-      super(name, permissions, aclFeature, modificationTime);
+        long dsQuota, XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
       this.nsQuota = nsQuota;
       this.dsQuota = dsQuota;
     }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Tue Aug 19 23:49:39 2014
@@ -33,13 +33,13 @@ import org.apache.hadoop.hdfs.protocol.Q
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.LongBitFormat;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -72,37 +72,29 @@ public class INodeFile extends INodeWith
   }
 
   /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
-  static class HeaderFormat {
-    /** Number of bits for Block size */
-    static final int BLOCKBITS = 48;
-    /** Header mask 64-bit representation */
-    static final long HEADERMASK = 0xffffL << BLOCKBITS;
-    static final long MAX_BLOCK_SIZE = ~HEADERMASK; 
-    
-    static short getReplication(long header) {
-      return (short) ((header & HEADERMASK) >> BLOCKBITS);
+  static enum HeaderFormat {
+    PREFERRED_BLOCK_SIZE(null, 48, 1),
+    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1);
+
+    private final LongBitFormat BITS;
+
+    private HeaderFormat(LongBitFormat previous, int length, long min) {
+      BITS = new LongBitFormat(name(), previous, length, min);
     }
 
-    static long combineReplication(long header, short replication) {
-      if (replication <= 0) {
-         throw new IllegalArgumentException(
-             "Unexpected value for the replication: " + replication);
-      }
-      return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
+    static short getReplication(long header) {
+      return (short)REPLICATION.BITS.retrieve(header);
     }
-    
+
     static long getPreferredBlockSize(long header) {
-      return header & MAX_BLOCK_SIZE;
+      return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
     }
 
-    static long combinePreferredBlockSize(long header, long blockSize) {
-      if (blockSize < 0) {
-         throw new IllegalArgumentException("Block size < 0: " + blockSize);
-      } else if (blockSize > MAX_BLOCK_SIZE) {
-        throw new IllegalArgumentException("Block size = " + blockSize
-            + " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
-     }
-      return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
+    static long toLong(long preferredBlockSize, short replication) {
+      long h = 0;
+      h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
+      h = REPLICATION.BITS.combine(replication, h);
+      return h;
     }
   }
 
@@ -114,8 +106,7 @@ public class INodeFile extends INodeWith
       long atime, BlockInfo[] blklist, short replication,
       long preferredBlockSize) {
     super(id, name, permissions, mtime, atime);
-    header = HeaderFormat.combineReplication(header, replication);
-    header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
+    header = HeaderFormat.toLong(preferredBlockSize, replication);
     this.blocks = blklist;
   }
   
@@ -144,6 +135,15 @@ public class INodeFile extends INodeWith
     return this;
   }
 
+  @Override
+  public boolean metadataEquals(INodeFileAttributes other) {
+    return other != null
+        && getHeaderLong()== other.getHeaderLong()
+        && getPermissionLong() == other.getPermissionLong()
+        && getAclFeature() == other.getAclFeature()
+        && getXAttrFeature() == other.getXAttrFeature();
+  }
+
   /* Start of Under-Construction Feature */
 
   /**
@@ -161,12 +161,11 @@ public class INodeFile extends INodeWith
   }
 
   /** Convert this file to an {@link INodeFileUnderConstruction}. */
-  INodeFile toUnderConstruction(String clientName, String clientMachine,
-      DatanodeDescriptor clientNode) {
+  INodeFile toUnderConstruction(String clientName, String clientMachine) {
     Preconditions.checkState(!isUnderConstruction(),
         "file is already under construction");
     FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
-        clientName, clientMachine, clientNode);
+        clientName, clientMachine);
     addFeature(uc);
     return this;
   }
@@ -285,7 +284,7 @@ public class INodeFile extends INodeWith
   }
 
   @Override
-  public INodeFile recordModification(final int latestSnapshotId) 
+  public void recordModification(final int latestSnapshotId)
       throws QuotaExceededException {
     if (isInLatestSnapshot(latestSnapshotId)
         && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@@ -297,7 +296,6 @@ public class INodeFile extends INodeWith
       // record self in the diff list if necessary
       sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
     }
-    return this;
   }
   
   public FileDiffList getDiffs() {
@@ -340,16 +338,15 @@ public class INodeFile extends INodeWith
 
   /** Set the replication factor of this file. */
   public final void setFileReplication(short replication) {
-    header = HeaderFormat.combineReplication(header, replication);
+    header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
   }
 
   /** Set the replication factor of this file. */
   public final INodeFile setFileReplication(short replication,
-      int latestSnapshotId, final INodeMap inodeMap)
-      throws QuotaExceededException {
-    final INodeFile nodeToUpdate = recordModification(latestSnapshotId);
-    nodeToUpdate.setFileReplication(replication);
-    return nodeToUpdate;
+      int latestSnapshotId) throws QuotaExceededException {
+    recordModification(latestSnapshotId);
+    setFileReplication(replication);
+    return this;
   }
 
   /** @return preferred block size (in bytes) of the file. */
@@ -435,17 +432,19 @@ public class INodeFile extends INodeWith
           removedINodes, countDiffChange);
     }
     Quota.Counts counts = Quota.Counts.newInstance();
-    if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) {
-      // this only happens when deleting the current file and the file is not
-      // in any snapshot
-      computeQuotaUsage(counts, false);
-      destroyAndCollectBlocks(collectedBlocks, removedINodes);
-    } else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) {
-      // when deleting the current file and the file is in snapshot, we should
-      // clean the 0-sized block if the file is UC
-      FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
-      if (uc != null) {
-        uc.cleanZeroSizeBlock(this, collectedBlocks);
+    if (snapshot == CURRENT_STATE_ID) {
+      if (priorSnapshotId == NO_SNAPSHOT_ID) {
+        // this only happens when deleting the current file and the file is not
+        // in any snapshot
+        computeQuotaUsage(counts, false);
+        destroyAndCollectBlocks(collectedBlocks, removedINodes);
+      } else {
+        // when deleting the current file and the file is in snapshot, we should
+        // clean the 0-sized block if the file is UC
+        FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
+        if (uc != null) {
+          uc.cleanZeroSizeBlock(this, collectedBlocks);
+        }
       }
     }
     return counts;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java Tue Aug 19 23:49:39 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * The attributes of a file.
@@ -35,6 +36,8 @@ public interface INodeFileAttributes ext
   /** @return the header as a long. */
   public long getHeaderLong();
 
+  public boolean metadataEquals(INodeFileAttributes other);
+
   /** A copy of the inode file attributes */
   public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
       implements INodeFileAttributes {
@@ -42,11 +45,10 @@ public interface INodeFileAttributes ext
 
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long accessTime,
-        short replication, long preferredBlockSize) {
-      super(name, permissions, aclFeature, modificationTime, accessTime);
-
-      final long h = HeaderFormat.combineReplication(0L, replication);
-      header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
+        short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, accessTime, 
+          xAttrsFeature);
+      header = HeaderFormat.toLong(preferredBlockSize, replication);
     }
 
     public SnapshotCopy(INodeFile file) {
@@ -68,5 +70,14 @@ public interface INodeFileAttributes ext
     public long getHeaderLong() {
       return header;
     }
+
+    @Override
+    public boolean metadataEquals(INodeFileAttributes other) {
+      return other != null
+          && getHeaderLong()== other.getHeaderLong()
+          && getPermissionLong() == other.getPermissionLong()
+          && getAclFeature() == other.getAclFeature()
+          && getXAttrFeature() == other.getXAttrFeature();
+    }
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java Tue Aug 19 23:49:39 2014
@@ -93,9 +93,8 @@ public class INodeMap {
         "", "", new FsPermission((short) 0)), 0, 0) {
       
       @Override
-      INode recordModification(int latestSnapshotId)
+      void recordModification(int latestSnapshotId)
           throws QuotaExceededException {
-        return null;
       }
       
       @Override

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Tue Aug 19 23:49:39 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 import com.google.common.base.Preconditions;
 
@@ -39,7 +40,7 @@ import com.google.common.base.Preconditi
  * snapshots and it is renamed/moved to other locations.
  * 
  * For example,
- * (1) Support we have /abc/foo, say the inode of foo is inode(id=1000,name=foo)
+ * (1) Suppose we have /abc/foo, say the inode of foo is inode(id=1000,name=foo)
  * (2) create snapshot s0 for /abc
  * (3) mv /abc/foo /xyz/bar, i.e. inode(id=1000,name=...) is renamed from "foo"
  *     to "bar" and its parent becomes /xyz.
@@ -228,6 +229,21 @@ public abstract class INodeReference ext
   final void removeAclFeature() {
     referred.removeAclFeature();
   }
+  
+  @Override
+  final XAttrFeature getXAttrFeature(int snapshotId) {
+    return referred.getXAttrFeature(snapshotId);
+  }
+  
+  @Override
+  final void addXAttrFeature(XAttrFeature xAttrFeature) {
+    referred.addXAttrFeature(xAttrFeature);
+  }
+  
+  @Override
+  final void removeXAttrFeature() {
+    referred.removeXAttrFeature();
+  }
 
   @Override
   public final short getFsPermissionShort() {
@@ -271,11 +287,9 @@ public abstract class INodeReference ext
   }
 
   @Override
-  final INode recordModification(int latestSnapshotId)
+  final void recordModification(int latestSnapshotId)
       throws QuotaExceededException {
     referred.recordModification(latestSnapshotId);
-    // reference is never replaced 
-    return this;
   }
 
   @Override // used by WithCount
@@ -418,6 +432,30 @@ public abstract class INodeReference ext
         return withNameList.get(-i - 2);
       }
     }
+
+    /**
+     * @return the WithName/DstReference node contained in the given snapshot.
+     */
+    public INodeReference getParentRef(int snapshotId) {
+      int start = 0;
+      int end = withNameList.size() - 1;
+      while (start < end) {
+        int mid = start + (end - start) / 2;
+        int sid = withNameList.get(mid).lastSnapshotId; 
+        if (sid == snapshotId) {
+          return withNameList.get(mid);
+        } else if (sid < snapshotId) {
+          start = mid + 1;
+        } else {
+          end = mid;
+        }
+      }
+      if (withNameList.get(start).lastSnapshotId >= snapshotId) {
+        return withNameList.get(start);
+      } else {
+        return this.getParentReference();
+      }
+    }
   }
   
   /** A reference with a fixed name. */

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Tue Aug 19 23:49:39 2014
@@ -25,6 +25,8 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.AclFeature;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * An {@link INode} representing a symbolic link.
@@ -45,12 +47,11 @@ public class INodeSymlink extends INodeW
   }
 
   @Override
-  INode recordModification(int latestSnapshotId) throws QuotaExceededException {
+  void recordModification(int latestSnapshotId) throws QuotaExceededException {
     if (isInLatestSnapshot(latestSnapshotId)) {
       INodeDirectory parent = getParent();
       parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
     }
-    return this;
   }
 
   /** @return true unconditionally. */
@@ -110,4 +111,38 @@ public class INodeSymlink extends INodeW
     super.dumpTreeRecursively(out, prefix, snapshot);
     out.println();
   }
+
+  /**
+   * getAclFeature is not overridden because it is needed for resolving
+   * symlinks.
+  @Override
+  final AclFeature getAclFeature(int snapshotId) {
+    throw new UnsupportedOperationException("ACLs are not supported on symlinks");
+  }
+  */
+
+  @Override
+  public void removeAclFeature() {
+    throw new UnsupportedOperationException("ACLs are not supported on symlinks");
+  }
+
+  @Override
+  public void addAclFeature(AclFeature f) {
+    throw new UnsupportedOperationException("ACLs are not supported on symlinks");
+  }
+
+  @Override
+  final XAttrFeature getXAttrFeature(int snapshotId) {
+    throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
+  }
+  
+  @Override
+  public void removeXAttrFeature() {
+    throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
+  }
+  
+  @Override
+  public void addXAttrFeature(XAttrFeature f) {
+    throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java Tue Aug 19 23:49:39 2014
@@ -21,8 +21,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.LongBitFormat;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 
 import com.google.common.base.Preconditions;
@@ -35,26 +35,28 @@ import com.google.common.base.Preconditi
 public abstract class INodeWithAdditionalFields extends INode
     implements LinkedElement {
   static enum PermissionStatusFormat {
-    MODE(0, 16),
-    GROUP(MODE.OFFSET + MODE.LENGTH, 25),
-    USER(GROUP.OFFSET + GROUP.LENGTH, 23);
+    MODE(null, 16),
+    GROUP(MODE.BITS, 25),
+    USER(GROUP.BITS, 23);
 
-    final int OFFSET;
-    final int LENGTH; //bit length
-    final long MASK;
+    final LongBitFormat BITS;
 
-    PermissionStatusFormat(int offset, int length) {
-      OFFSET = offset;
-      LENGTH = length;
-      MASK = ((-1L) >>> (64 - LENGTH)) << OFFSET;
+    private PermissionStatusFormat(LongBitFormat previous, int length) {
+      BITS = new LongBitFormat(name(), previous, length, 0);
     }
 
-    long retrieve(long record) {
-      return (record & MASK) >>> OFFSET;
+    static String getUser(long permission) {
+      final int n = (int)USER.BITS.retrieve(permission);
+      return SerialNumberManager.INSTANCE.getUser(n);
     }
 
-    long combine(long bits, long record) {
-      return (record & ~MASK) | (bits << OFFSET);
+    static String getGroup(long permission) {
+      final int n = (int)GROUP.BITS.retrieve(permission);
+      return SerialNumberManager.INSTANCE.getGroup(n);
+    }
+    
+    static short getMode(long permission) {
+      return (short)MODE.BITS.retrieve(permission);
     }
 
     /** Encode the {@link PermissionStatus} to a long. */
@@ -62,12 +64,12 @@ public abstract class INodeWithAdditiona
       long permission = 0L;
       final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
           ps.getUserName());
-      permission = USER.combine(user, permission);
+      permission = USER.BITS.combine(user, permission);
       final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
           ps.getGroupName());
-      permission = GROUP.combine(group, permission);
+      permission = GROUP.BITS.combine(group, permission);
       final int mode = ps.getPermission().toShort();
-      permission = MODE.combine(mode, permission);
+      permission = MODE.BITS.combine(mode, permission);
       return permission;
     }
   }
@@ -161,7 +163,7 @@ public abstract class INodeWithAdditiona
   }
 
   private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
-    this.permission = f.combine(n, permission);
+    this.permission = f.BITS.combine(n, permission);
   }
 
   @Override
@@ -169,9 +171,7 @@ public abstract class INodeWithAdditiona
     if (snapshotId != Snapshot.CURRENT_STATE_ID) {
       return getSnapshotINode(snapshotId).getUserName();
     }
-
-    int n = (int)PermissionStatusFormat.USER.retrieve(permission);
-    return SerialNumberManager.INSTANCE.getUser(n);
+    return PermissionStatusFormat.getUser(permission);
   }
 
   @Override
@@ -185,9 +185,7 @@ public abstract class INodeWithAdditiona
     if (snapshotId != Snapshot.CURRENT_STATE_ID) {
       return getSnapshotINode(snapshotId).getGroupName();
     }
-
-    int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
-    return SerialNumberManager.INSTANCE.getGroup(n);
+    return PermissionStatusFormat.getGroup(permission);
   }
 
   @Override
@@ -207,7 +205,7 @@ public abstract class INodeWithAdditiona
 
   @Override
   public final short getFsPermissionShort() {
-    return (short)PermissionStatusFormat.MODE.retrieve(permission);
+    return PermissionStatusFormat.getMode(permission);
   }
   @Override
   void setPermission(FsPermission permission) {
@@ -317,8 +315,9 @@ public abstract class INodeWithAdditiona
   }
 
   protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
+    Preconditions.checkArgument(clazz != null);
     for (Feature f : features) {
-      if (f.getClass() == clazz) {
+      if (clazz.isAssignableFrom(f.getClass())) {
         @SuppressWarnings("unchecked")
         T ret = (T) f;
         return ret;
@@ -340,6 +339,30 @@ public abstract class INodeWithAdditiona
 
     addFeature(f);
   }
+  
+  @Override
+  XAttrFeature getXAttrFeature(int snapshotId) {
+    if (snapshotId != Snapshot.CURRENT_STATE_ID) {
+      return getSnapshotINode(snapshotId).getXAttrFeature();
+    }
+
+    return getFeature(XAttrFeature.class);
+  }
+  
+  @Override
+  public void removeXAttrFeature() {
+    XAttrFeature f = getXAttrFeature();
+    Preconditions.checkNotNull(f);
+    removeFeature(f);
+  }
+  
+  @Override
+  public void addXAttrFeature(XAttrFeature f) {
+    XAttrFeature f1 = getXAttrFeature();
+    Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
+    
+    addFeature(f);
+  }
 
   public final Feature[] getFeatures() {
     return features;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java Tue Aug 19 23:49:39 2014
@@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 import com.google.common.base.Preconditions;
@@ -46,6 +45,28 @@ public class INodesInPath {
         : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
   }
 
+  static INodesInPath fromINode(INode inode) {
+    int depth = 0, index;
+    INode tmp = inode;
+    while (tmp != null) {
+      depth++;
+      tmp = tmp.getParent();
+    }
+    final byte[][] path = new byte[depth][];
+    final INode[] inodes = new INode[depth];
+    final INodesInPath iip = new INodesInPath(path, depth);
+    tmp = inode;
+    index = depth;
+    while (tmp != null) {
+      index--;
+      path[index] = tmp.getKey();
+      inodes[index] = tmp;
+      tmp = tmp.getParent();
+    }
+    iip.setINodes(inodes);
+    return iip;
+  }
+
   /**
    * Given some components, create a path name.
    * @param components The path components
@@ -186,8 +207,7 @@ public class INodesInPath {
       final byte[] childName = components[count + 1];
       
       // check if the next byte[] in components is for ".snapshot"
-      if (isDotSnapshotDir(childName)
-          && isDir && dir instanceof INodeDirectorySnapshottable) {
+      if (isDotSnapshotDir(childName) && isDir && dir.isSnapshottable()) {
         // skip the ".snapshot" in components
         count++;
         index++;
@@ -200,8 +220,7 @@ public class INodesInPath {
           break;
         }
         // Resolve snapshot root
-        final Snapshot s = ((INodeDirectorySnapshottable)dir).getSnapshot(
-            components[count + 1]);
+        final Snapshot s = dir.getSnapshot(components[count + 1]);
         if (s == null) {
           //snapshot not found
           curNode = null;
@@ -341,6 +360,11 @@ public class INodesInPath {
   private void addNode(INode node) {
     inodes[numNonNull++] = node;
   }
+
+  private void setINodes(INode inodes[]) {
+    this.inodes = inodes;
+    this.numNonNull = this.inodes.length;
+  }
   
   void setINode(int i, INode inode) {
     inodes[i >= 0? i: inodes.length + i] = inode;
@@ -417,4 +441,4 @@ public class INodesInPath {
           + ", this=" + toString(false));
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java Tue Aug 19 23:49:39 2014
@@ -82,7 +82,7 @@ public class ImageServlet extends HttpSe
   private static final String IMAGE_FILE_TYPE = "imageFile";
 
   private static final Set<Long> currentlyDownloadingCheckpoints =
-    Collections.<Long>synchronizedSet(new HashSet<Long>());
+    Collections.synchronizedSet(new HashSet<Long>());
   
   @Override
   public void doGet(final HttpServletRequest request,

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Tue Aug 19 23:49:39 2014
@@ -402,7 +402,6 @@ public class LeaseManager {
   /**
    * Get the list of inodes corresponding to valid leases.
    * @return list of inodes
-   * @throws UnresolvedLinkException
    */
   Map<String, INodeFile> getINodesUnderConstruction() {
     Map<String, INodeFile> inodes = new TreeMap<String, INodeFile>();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java Tue Aug 19 23:49:39 2014
@@ -42,7 +42,6 @@ interface LogsPurgeable {
    * 
    * @param fromTxId the first transaction id we want to read
    * @param inProgressOk whether or not in-progress streams should be returned
-   * @return a list of streams
    * @throws IOException if the underlying storage has an error or is otherwise
    * inaccessible
    */

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Tue Aug 19 23:49:39 2014
@@ -77,7 +77,8 @@ public class NNStorage extends Storage i
     IMAGE_ROLLBACK("fsimage_rollback"),
     EDITS_NEW ("edits.new"), // from "old" pre-HDFS-1073 format
     EDITS_INPROGRESS ("edits_inprogress"),
-    EDITS_TMP ("edits_tmp");
+    EDITS_TMP ("edits_tmp"),
+    IMAGE_LEGACY_OIV ("fsimage_legacy_oiv");  // For pre-PB format
 
     private String fileName = null;
     private NameNodeFile(String name) { this.fileName = name; }
@@ -425,8 +426,7 @@ public class NNStorage extends Storage i
   
   /**
    * Write last checkpoint time into a separate file.
-   *
-   * @param sd
+   * @param sd storage directory
    * @throws IOException
    */
   void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
@@ -694,6 +694,10 @@ public class NNStorage extends Storage i
     return getNameNodeFileName(NameNodeFile.IMAGE_ROLLBACK, txid);
   }
 
+  public static String getLegacyOIVImageFileName(long txid) {
+    return getNameNodeFileName(NameNodeFile.IMAGE_LEGACY_OIV, txid);
+  }
+
   private static String getNameNodeFileName(NameNodeFile nnf, long txid) {
     return String.format("%s_%019d", nnf.getName(), txid);
   }
@@ -832,7 +836,7 @@ public class NNStorage extends Storage i
    */
   void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
       throws IOException {
-    if (startOpt == StartupOption.UPGRADE) {
+    if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) {
       // If upgrade from a release that does not support federation,
       // if clusterId is provided in the startupOptions use it.
       // Else generate a new cluster ID      

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java Tue Aug 19 23:49:39 2014
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.File;
+import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.TreeSet;
 
@@ -233,4 +235,58 @@ public class NNStorageRetentionManager {
       }      
     }
   }
+
+  /**
+   * Delete old OIV fsimages. Since the target dir is not a full blown
+   * storage directory, we simply list and keep the latest ones. For the
+   * same reason, no storage inspector is used.
+   */
+  void purgeOldLegacyOIVImages(String dir, long txid) {
+    File oivImageDir = new File(dir);
+    final String oivImagePrefix = NameNodeFile.IMAGE_LEGACY_OIV.getName();
+    String filesInStorage[];
+
+    // Get the listing
+    filesInStorage = oivImageDir.list(new FilenameFilter() {
+      @Override
+      public boolean accept(File dir, String name) {
+        return name.matches(oivImagePrefix + "_(\\d+)");
+      }
+    });
+
+    // Check whether there is any work to do.
+    if (filesInStorage.length <= numCheckpointsToRetain) {
+      return;
+    }
+
+    // Create a sorted list of txids from the file names.
+    TreeSet<Long> sortedTxIds = new TreeSet<Long>();
+    for (String fName : filesInStorage) {
+      // Extract the transaction id from the file name.
+      long fTxId;
+      try {
+        fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
+      } catch (NumberFormatException nfe) {
+        // This should not happen since we have already filtered it.
+        // Log and continue.
+        LOG.warn("Invalid file name. Skipping " + fName);
+        continue;
+      }
+      sortedTxIds.add(Long.valueOf(fTxId));
+    }
+
+    int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain;
+    Iterator<Long> iter = sortedTxIds.iterator();
+    while (numFilesToDelete > 0 && iter.hasNext()) {
+      long txIdVal = iter.next().longValue();
+      String fileName = NNStorage.getLegacyOIVImageFileName(txIdVal);
+      LOG.info("Deleting " + fileName);
+      File fileToDelete = new File(oivImageDir, fileName);
+      if (!fileToDelete.delete()) {
+        // deletion failed.
+        LOG.warn("Failed to delete image file: " + fileToDelete);
+      }
+      numFilesToDelete--;
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Aug 19 23:49:39 2014
@@ -175,6 +175,8 @@ public class NameNode implements NameNod
     DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
     DFS_NAMENODE_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+    DFS_NAMENODE_HTTP_BIND_HOST_KEY,
+    DFS_NAMENODE_HTTPS_BIND_HOST_KEY,
     DFS_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
@@ -199,25 +201,31 @@ public class NameNode implements NameNod
   };
   
   private static final String USAGE = "Usage: java NameNode ["
-      + StartupOption.BACKUP.getName() + "] | ["
-      + StartupOption.CHECKPOINT.getName() + "] | ["
+      + StartupOption.BACKUP.getName() + "] | \n\t["
+      + StartupOption.CHECKPOINT.getName() + "] | \n\t["
       + StartupOption.FORMAT.getName() + " ["
       + StartupOption.CLUSTERID.getName() + " cid ] ["
       + StartupOption.FORCE.getName() + "] ["
-      + StartupOption.NONINTERACTIVE.getName() + "] ] | ["
+      + StartupOption.NONINTERACTIVE.getName() + "] ] | \n\t["
       + StartupOption.UPGRADE.getName() + 
         " [" + StartupOption.CLUSTERID.getName() + " cid]" +
-        " [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | ["
-      + StartupOption.ROLLBACK.getName() + "] | ["
+        " [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+      + StartupOption.UPGRADEONLY.getName() + 
+        " [" + StartupOption.CLUSTERID.getName() + " cid]" +
+        " [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+      + StartupOption.ROLLBACK.getName() + "] | \n\t["
       + StartupOption.ROLLINGUPGRADE.getName() + " <"
       + RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|"
-      + RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "> ] | ["
-      + StartupOption.FINALIZE.getName() + "] | ["
-      + StartupOption.IMPORT.getName() + "] | ["
-      + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
-      + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
-      + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
-      + " ] ]";
+      + RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "> ] | \n\t["
+      + StartupOption.FINALIZE.getName() + "] | \n\t["
+      + StartupOption.IMPORT.getName() + "] | \n\t["
+      + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t["
+      + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | \n\t["
+      + StartupOption.RECOVER.getName() + " [ "
+      + StartupOption.FORCE.getName() + "] ] | \n\t["
+      + StartupOption.METADATAVERSION.getName() + " ] "
+      + " ]";
+
   
   public long getProtocolVersion(String protocol, 
                                  long clientVersion) throws IOException {
@@ -271,10 +279,11 @@ public class NameNode implements NameNod
   private JvmPauseMonitor pauseMonitor;
   private ObjectName nameNodeStatusBeanName;
   /**
-   * The service name of the delegation token issued by the namenode. It is
-   * the name service id in HA mode, or the rpc address in non-HA mode.
+   * The namenode address that clients will use to access this namenode
+   * or the name service. For HA configurations using logical URI, it
+   * will be the logical address.
    */
-  private String tokenServiceName;
+  private String clientNamenodeAddress;
   
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
@@ -317,7 +326,54 @@ public class NameNode implements NameNod
    *
    * @return The name service id in HA-mode, or the rpc address in non-HA mode
    */
-  public String getTokenServiceName() { return tokenServiceName; }
+  public String getTokenServiceName() {
+    return getClientNamenodeAddress();
+  }
+
+  /**
+   * Set the namenode address that will be used by clients to access this
+   * namenode or name service. This needs to be called before the config
+   * is overriden.
+   */
+  public void setClientNamenodeAddress(Configuration conf) {
+    String nnAddr = conf.get(FS_DEFAULT_NAME_KEY);
+    if (nnAddr == null) {
+      // default fs is not set.
+      clientNamenodeAddress = null;
+      return;
+    }
+
+    LOG.info(FS_DEFAULT_NAME_KEY + " is " + nnAddr);
+    URI nnUri = URI.create(nnAddr);
+
+    String nnHost = nnUri.getHost();
+    if (nnHost == null) {
+      clientNamenodeAddress = null;
+      return;
+    }
+
+    if (DFSUtil.getNameServiceIds(conf).contains(nnHost)) {
+      // host name is logical
+      clientNamenodeAddress = nnHost;
+    } else if (nnUri.getPort() > 0) {
+      // physical address with a valid port
+      clientNamenodeAddress = nnUri.getAuthority();
+    } else {
+      // the port is missing or 0. Figure out real bind address later.
+      clientNamenodeAddress = null;
+      return;
+    }
+    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
+        + " this namenode/service.");
+  }
+
+  /**
+   * Get the namenode address to be used by clients.
+   * @return nn address
+   */
+  public String getClientNamenodeAddress() {
+    return clientNamenodeAddress;
+  }
 
   public static InetSocketAddress getAddress(String address) {
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
@@ -356,8 +412,6 @@ public class NameNode implements NameNod
 
 
   /**
-   * TODO:FEDERATION
-   * @param filesystemURI
    * @return address of file system
    */
   public static InetSocketAddress getAddress(URI filesystemURI) {
@@ -446,6 +500,29 @@ public class NameNode implements NameNod
     return getHttpAddress(conf);
   }
 
+  /**
+   * HTTP server address for binding the endpoint. This method is
+   * for use by the NameNode and its derivatives. It may return
+   * a different address than the one that should be used by clients to
+   * connect to the NameNode. See
+   * {@link DFSConfigKeys#DFS_NAMENODE_HTTP_BIND_HOST_KEY}
+   *
+   * @param conf
+   * @return
+   */
+  protected InetSocketAddress getHttpServerBindAddress(Configuration conf) {
+    InetSocketAddress bindAddress = getHttpServerAddress(conf);
+
+    // If DFS_NAMENODE_HTTP_BIND_HOST_KEY exists then it overrides the
+    // host name portion of DFS_NAMENODE_HTTP_ADDRESS_KEY.
+    final String bindHost = conf.getTrimmed(DFS_NAMENODE_HTTP_BIND_HOST_KEY);
+    if (bindHost != null && !bindHost.isEmpty()) {
+      bindAddress = new InetSocketAddress(bindHost, bindAddress.getPort());
+    }
+
+    return bindAddress;
+  }
+
   /** @return the NameNode HTTP address. */
   public static InetSocketAddress getHttpAddress(Configuration conf) {
     return  NetUtils.createSocketAddr(
@@ -512,9 +589,14 @@ public class NameNode implements NameNod
     loadNamesystem(conf);
 
     rpcServer = createRpcServer(conf);
-    final String nsId = getNameServiceId(conf);
-    tokenServiceName = HAUtil.isHAEnabled(conf, nsId) ? nsId : NetUtils
-            .getHostPortString(rpcServer.getRpcAddress());
+    if (clientNamenodeAddress == null) {
+      // This is expected for MiniDFSCluster. Set it now using 
+      // the RPC server's bind address.
+      clientNamenodeAddress = 
+          NetUtils.getHostPortString(rpcServer.getRpcAddress());
+      LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
+          + " this namenode/service.");
+    }
     if (NamenodeRole.NAMENODE == role) {
       httpServer.setNameNodeAddress(getNameNodeAddress());
       httpServer.setFSImage(getFSImage());
@@ -522,7 +604,8 @@ public class NameNode implements NameNod
     
     pauseMonitor = new JvmPauseMonitor(conf);
     pauseMonitor.start();
-
+    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
+    
     startCommonServices(conf);
   }
   
@@ -610,7 +693,7 @@ public class NameNode implements NameNod
   }
   
   private void startHttpServer(final Configuration conf) throws IOException {
-    httpServer = new NameNodeHttpServer(conf, this, getHttpServerAddress(conf));
+    httpServer = new NameNodeHttpServer(conf, this, getHttpServerBindAddress(conf));
     httpServer.start();
     httpServer.setStartupProgress(startupProgress);
   }
@@ -633,6 +716,7 @@ public class NameNode implements NameNod
    * <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
    * <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
    * <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster  
+   * <li>{@link StartupOption#UPGRADEONLY UPGRADEONLY} - upgrade the cluster  
    * upgrade and create a snapshot of the current file system state</li> 
    * <li>{@link StartupOption#RECOVER RECOVERY} - recover name node
    * metadata</li>
@@ -660,6 +744,7 @@ public class NameNode implements NameNod
       throws IOException { 
     this.conf = conf;
     this.role = role;
+    setClientNamenodeAddress(conf);
     String nsId = getNameServiceId(conf);
     String namenodeId = HAUtil.getNameNodeId(conf, nsId);
     this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
@@ -686,7 +771,8 @@ public class NameNode implements NameNod
   }
 
   protected HAState createHAState(StartupOption startOpt) {
-    if (!haEnabled || startOpt == StartupOption.UPGRADE) {
+    if (!haEnabled || startOpt == StartupOption.UPGRADE 
+        || startOpt == StartupOption.UPGRADEONLY) {
       return ACTIVE_STATE;
     } else {
       return STANDBY_STATE;
@@ -753,7 +839,7 @@ public class NameNode implements NameNod
   /** get FSImage */
   @VisibleForTesting
   public FSImage getFSImage() {
-    return namesystem.dir.fsImage;
+    return namesystem.getFSImage();
   }
 
   /**
@@ -800,8 +886,8 @@ public class NameNode implements NameNod
    * Interactively confirm that formatting is desired 
    * for each existing directory and format them.
    * 
-   * @param conf
-   * @param force
+   * @param conf configuration to use
+   * @param force if true, format regardless of whether dirs exist
    * @return true if formatting was aborted, false otherwise
    * @throws IOException
    */
@@ -1063,7 +1149,7 @@ public class NameNode implements NameNod
         return true;
       }
     }
-    nsys.dir.fsImage.doRollback(nsys);
+    nsys.getFSImage().doRollback(nsys);
     return false;
   }
 
@@ -1117,8 +1203,10 @@ public class NameNode implements NameNod
         startOpt = StartupOption.BACKUP;
       } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.CHECKPOINT;
-      } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
-        startOpt = StartupOption.UPGRADE;
+      } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)
+          || StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ? 
+            StartupOption.UPGRADE : StartupOption.UPGRADEONLY;
         /* Can be followed by CLUSTERID with a required parameter or
          * RENAMERESERVED with an optional parameter
          */
@@ -1188,6 +1276,8 @@ public class NameNode implements NameNod
               "can't understand option \"" + args[i] + "\"");
           }
         }
+      } else if (StartupOption.METADATAVERSION.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.METADATAVERSION;
       } else {
         return null;
       }
@@ -1240,6 +1330,21 @@ public class NameNode implements NameNod
     }
   }
 
+  /**
+   * Verify that configured directories exist, then print the metadata versions
+   * of the software and the image.
+   *
+   * @param conf configuration to use
+   * @throws IOException
+   */
+  private static boolean printMetadataVersion(Configuration conf)
+    throws IOException {
+    final FSImage fsImage = new FSImage(conf);
+    final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
+    return fsImage.recoverTransitionRead(
+      StartupOption.METADATAVERSION, fs, null);
+  }
+
   public static NameNode createNameNode(String argv[], Configuration conf)
       throws IOException {
     LOG.info("createNameNode " + Arrays.asList(argv));
@@ -1304,6 +1409,17 @@ public class NameNode implements NameNod
         NameNode.doRecovery(startOpt, conf);
         return null;
       }
+      case METADATAVERSION: {
+        printMetadataVersion(conf);
+        terminate(0);
+        return null; // avoid javac warning
+      }
+      case UPGRADEONLY: {
+        DefaultMetricsSystem.initialize("NameNode");
+        new NameNode(conf);
+        terminate(0);
+        return null;
+      }
       default: {
         DefaultMetricsSystem.initialize("NameNode");
         return new NameNode(conf);
@@ -1600,7 +1716,11 @@ public class NameNode implements NameNod
   public boolean isStandbyState() {
     return (state.equals(STANDBY_STATE));
   }
-
+  
+  public boolean isActiveState() {
+    return (state.equals(ACTIVE_STATE));
+  }
+  
   /**
    * Check that a request to change this node's HA state is valid.
    * In particular, verifies that, if auto failover is enabled, non-forced