You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2016/01/12 07:48:50 UTC

hadoop git commit: HDFS-9621. Consolidate FSDirStatAndListingOp#createFileStatus to let its INodesInPath parameter always include the target INode. Contributed by Jing Zhao.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 93b7ef8ae -> 313f03bfd


HDFS-9621. Consolidate FSDirStatAndListingOp#createFileStatus to let its INodesInPath parameter always include the target INode. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/313f03bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/313f03bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/313f03bf

Branch: refs/heads/branch-2
Commit: 313f03bfdab32cf365bc3470c5f9b6928a24f099
Parents: 93b7ef8
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Jan 11 22:48:36 2016 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Jan 11 22:48:36 2016 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../server/namenode/FSDirStatAndListingOp.java  | 77 +++++++++++---------
 .../hdfs/server/namenode/FSDirectory.java       |  6 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  8 +-
 .../hdfs/server/namenode/INodesInPath.java      |  4 +-
 5 files changed, 53 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/313f03bf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f9adf52..9e32dc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -43,6 +43,9 @@ Release 2.9.0 - UNRELEASED
 
   BUG FIXES
 
+    HDFS-9621. Consolidate FSDirStatAndListingOp#createFileStatus to let its
+    INodesInPath parameter always include the target INode. (jing9)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/313f03bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index c5c2fb4..099ab88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -253,12 +253,14 @@ class FSDirStatAndListingOp {
           .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
       if (!targetNode.isDirectory()) {
+        // return the file's status. note that the iip already includes the
+        // target INode
         INodeAttributes nodeAttrs = getINodeAttributes(
             fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode,
             snapshot);
         return new DirectoryListing(
             new HdfsFileStatus[]{ createFileStatus(
-                fsd, HdfsFileStatus.EMPTY_NAME, targetNode, nodeAttrs,
+                fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
                 needLocation, parentStoragePolicy, snapshot, isRawPath, iip)
             }, 0);
       }
@@ -272,7 +274,7 @@ class FSDirStatAndListingOp {
       int locationBudget = fsd.getLsLimit();
       int listingCnt = 0;
       HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
-      for (int i=0; i<numOfListing && locationBudget>0; i++) {
+      for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
         INode cur = contents.get(startChild+i);
         byte curPolicy = isSuperUser && !cur.isSymlink()?
             cur.getLocalStoragePolicyID():
@@ -280,9 +282,11 @@ class FSDirStatAndListingOp {
         INodeAttributes nodeAttrs = getINodeAttributes(
             fsd, src, cur.getLocalNameBytes(), cur,
             snapshot);
-        listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(),
-            cur, nodeAttrs, needLocation, getStoragePolicyID(curPolicy,
-                parentStoragePolicy), snapshot, isRawPath, iip);
+        final INodesInPath iipWithChild = INodesInPath.append(iip, cur,
+            cur.getLocalNameBytes());
+        listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), nodeAttrs,
+            needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy),
+            snapshot, isRawPath, iipWithChild);
         listingCnt++;
         if (needLocation) {
             // Once we  hit lsLimit locations, stop.
@@ -337,8 +341,7 @@ class FSDirStatAndListingOp {
           fsd, src, sRoot.getLocalNameBytes(),
           node, Snapshot.CURRENT_STATE_ID);
       listing[i] = createFileStatus(
-          fsd, sRoot.getLocalNameBytes(),
-          sRoot, nodeAttrs,
+          fsd, sRoot.getLocalNameBytes(), nodeAttrs,
           HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
           Snapshot.CURRENT_STATE_ID, false,
           INodesInPath.fromINode(sRoot));
@@ -358,31 +361,31 @@ class FSDirStatAndListingOp {
 
   /** Get the file info for a specific file.
    * @param fsd FSDirectory
-   * @param src The string representation of the path to the file
+   * @param iip The path to the file, the file is included
    * @param isRawPath true if a /.reserved/raw pathname was passed by the user
    * @param includeStoragePolicy whether to include storage policy
    * @return object containing information regarding the file
    *         or null if file not found
    */
   static HdfsFileStatus getFileInfo(
-      FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
+      FSDirectory fsd, String path, INodesInPath iip, boolean isRawPath,
       boolean includeStoragePolicy)
       throws IOException {
     fsd.readLock();
     try {
-      final INode i = src.getLastINode();
-      if (i == null) {
+      final INode node = iip.getLastINode();
+      if (node == null) {
         return null;
       }
 
-      byte policyId = includeStoragePolicy && !i.isSymlink() ?
-          i.getStoragePolicyID() :
+      byte policyId = includeStoragePolicy && !node.isSymlink() ?
+          node.getStoragePolicyID() :
           HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
       INodeAttributes nodeAttrs = getINodeAttributes(fsd, path,
                                                      HdfsFileStatus.EMPTY_NAME,
-                                                     i, src.getPathSnapshotId());
-      return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, i, nodeAttrs,
-                              policyId, src.getPathSnapshotId(), isRawPath, src);
+                                                     node, iip.getPathSnapshotId());
+      return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
+                              policyId, iip.getPathSnapshotId(), isRawPath, iip);
     } finally {
       fsd.readUnlock();
     }
@@ -419,51 +422,54 @@ class FSDirStatAndListingOp {
    *
    * @param fsd FSDirectory
    * @param path the local name
-   * @param node inode
    * @param needLocation if block locations need to be included or not
    * @param isRawPath true if this is being called on behalf of a path in
    *                  /.reserved/raw
+   * @param iip the INodesInPath containing the target INode and its ancestors
    * @return a file status
    * @throws java.io.IOException if any error occurs
    */
   private static HdfsFileStatus createFileStatus(
-      FSDirectory fsd, byte[] path, INode node, INodeAttributes nodeAttrs,
+      FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
       boolean needLocation, byte storagePolicy, int snapshot, boolean isRawPath,
       INodesInPath iip)
       throws IOException {
     if (needLocation) {
-      return createLocatedFileStatus(fsd, path, node, nodeAttrs, storagePolicy,
+      return createLocatedFileStatus(fsd, path, nodeAttrs, storagePolicy,
                                      snapshot, isRawPath, iip);
     } else {
-      return createFileStatus(fsd, path, node, nodeAttrs, storagePolicy,
+      return createFileStatus(fsd, path, nodeAttrs, storagePolicy,
                               snapshot, isRawPath, iip);
     }
   }
 
   /**
-   * Create FileStatus by file INode
+   * Create FileStatus for an given INodeFile.
+   * @param iip The INodesInPath containing the INodeFile and its ancestors
    */
   static HdfsFileStatus createFileStatusForEditLog(
-      FSDirectory fsd, String fullPath, byte[] path, INode node,
+      FSDirectory fsd, String fullPath, byte[] path,
       byte storagePolicy, int snapshot, boolean isRawPath,
       INodesInPath iip) throws IOException {
     INodeAttributes nodeAttrs = getINodeAttributes(
-        fsd, fullPath, path, node, snapshot);
-    return createFileStatus(fsd, path, node, nodeAttrs,
-                            storagePolicy, snapshot, isRawPath, iip);
+        fsd, fullPath, path, iip.getLastINode(), snapshot);
+    return createFileStatus(fsd, path, nodeAttrs, storagePolicy,
+                            snapshot, isRawPath, iip);
   }
 
   /**
-   * Create FileStatus by file INode
+   * create file status for a given INode
+   * @param iip the INodesInPath containing the target INode and its ancestors
    */
   static HdfsFileStatus createFileStatus(
-      FSDirectory fsd, byte[] path, INode node,
+      FSDirectory fsd, byte[] path,
       INodeAttributes nodeAttrs, byte storagePolicy, int snapshot,
       boolean isRawPath, INodesInPath iip) throws IOException {
     long size = 0;     // length is zero for directories
     short replication = 0;
     long blocksize = 0;
     final boolean isEncrypted;
+    final INode node = iip.getLastINode();
 
     final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
         .getFileEncryptionInfo(fsd, node, snapshot, iip);
@@ -474,11 +480,9 @@ class FSDirStatAndListingOp {
       replication = fileNode.getFileReplication(snapshot);
       blocksize = fileNode.getPreferredBlockSize();
       isEncrypted = (feInfo != null)
-          || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd,
-              INodesInPath.fromINode(node)));
+          || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
     } else {
-      isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd,
-          INodesInPath.fromINode(node));
+      isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
     }
 
     int childrenNum = node.isDirectory() ?
@@ -509,9 +513,10 @@ class FSDirStatAndListingOp {
 
   /**
    * Create FileStatus with location info by file INode
+   * @param iip the INodesInPath containing the target INode and its ancestors
    */
   private static HdfsLocatedFileStatus createLocatedFileStatus(
-      FSDirectory fsd, byte[] path, INode node, INodeAttributes nodeAttrs,
+      FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
       byte storagePolicy, int snapshot,
       boolean isRawPath, INodesInPath iip) throws IOException {
     assert fsd.hasReadLock();
@@ -520,6 +525,8 @@ class FSDirStatAndListingOp {
     long blocksize = 0;
     LocatedBlocks loc = null;
     final boolean isEncrypted;
+    final INode node = iip.getLastINode();
+
     final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
         .getFileEncryptionInfo(fsd, node, snapshot, iip);
     if (node.isFile()) {
@@ -540,11 +547,9 @@ class FSDirStatAndListingOp {
         loc = new LocatedBlocks();
       }
       isEncrypted = (feInfo != null)
-          || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd,
-              INodesInPath.fromINode(node)));
+          || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
     } else {
-      isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd,
-          INodesInPath.fromINode(node));
+      isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
     }
     int childrenNum = node.isDirectory() ?
         node.asDirectory().getChildrenNum(snapshot) : 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/313f03bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index d885e2e..52af503 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1665,11 +1665,11 @@ public class FSDirectory implements Closeable {
 
   INodeAttributes getAttributes(String fullPath, byte[] path,
       INode node, int snapshot) {
-    INodeAttributes nodeAttrs = node;
+    INodeAttributes nodeAttrs;
     if (attributeProvider != null) {
       nodeAttrs = node.getSnapshotINode(snapshot);
-      fullPath = fullPath + (fullPath.endsWith(Path.SEPARATOR) ? ""
-                                                               : Path.SEPARATOR)
+      fullPath = fullPath
+          + (fullPath.endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR)
           + DFSUtil.bytes2String(path);
       nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs);
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/313f03bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 71fdbb5..6b32c939 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -371,13 +371,14 @@ public class FSEditLogLoader {
             addCloseOp.atime, addCloseOp.blockSize, true,
             addCloseOp.clientName, addCloseOp.clientMachine,
             addCloseOp.storagePolicyId);
+        assert newFile != null;
         iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
         fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId());
 
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
           HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
-              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile,
+              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
               HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
               false, iip);
           fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
@@ -396,8 +397,7 @@ public class FSEditLogLoader {
           // add the op into retry cache if necessary
           if (toAddRetryCache) {
             HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
-                fsNamesys.dir, path,
-                HdfsFileStatus.EMPTY_NAME, newFile,
+                fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
                 HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
                 Snapshot.CURRENT_STATE_ID, false, iip);
             fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
@@ -470,7 +470,7 @@ public class FSEditLogLoader {
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
           HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
-              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file,
+              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
               HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
               Snapshot.CURRENT_STATE_ID, false, iip);
           fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/313f03bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 72ca6ff..1d540b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -254,7 +254,7 @@ public class INodesInPath {
    */
   public static INodesInPath append(INodesInPath iip, INode child,
       byte[] childName) {
-    Preconditions.checkArgument(!iip.isSnapshot && iip.length() > 0);
+    Preconditions.checkArgument(iip.length() > 0);
     Preconditions.checkArgument(iip.getLastINode() != null && iip
         .getLastINode().isDirectory());
     INode[] inodes = new INode[iip.length() + 1];
@@ -263,7 +263,7 @@ public class INodesInPath {
     byte[][] path = new byte[iip.path.length + 1][];
     System.arraycopy(iip.path, 0, path, 0, path.length - 1);
     path[path.length - 1] = childName;
-    return new INodesInPath(inodes, path, false, iip.snapshotId);
+    return new INodesInPath(inodes, path, iip.isSnapshot, iip.snapshotId);
   }
 
   private final byte[][] path;