You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2017/07/14 21:37:24 UTC

[1/2] hadoop git commit: Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix commit message.

Repository: hadoop
Updated Branches:
  refs/heads/trunk 9e0cde146 -> f413ee33d


Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix commit message.

This reverts commit a29fe100b3c671954b759add5923a2b44af9e6a4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f12bb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f12bb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f12bb5

Branch: refs/heads/trunk
Commit: a1f12bb543778ddc243205eaa962e99da4d8f135
Parents: 9e0cde1
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Fri Jul 14 14:34:01 2017 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Fri Jul 14 14:34:01 2017 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockCollection.java |   4 +-
 .../ContentSummaryComputationContext.java       |  20 --
 .../namenode/DirectoryWithQuotaFeature.java     |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   9 +-
 .../server/namenode/FSPermissionChecker.java    |  32 ---
 .../hadoop/hdfs/server/namenode/INode.java      |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java    |   9 +-
 .../hdfs/server/namenode/INodeReference.java    |   3 +-
 .../snapshot/DirectorySnapshottableFeature.java |   3 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   3 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   4 +-
 .../TestGetContentSummaryWithPermission.java    | 201 -------------------
 12 files changed, 16 insertions(+), 285 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index b880590..2f214be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.security.AccessControlException;
 
 /** 
  * This interface is used by the block manager to expose a
@@ -37,8 +36,7 @@ public interface BlockCollection {
   /** 
    * Get content summary.
    */
-  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
-      throws AccessControlException;
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
 
   /**
    * @return the number of blocks or block groups

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 43e6f0d..8d5aa0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,14 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.security.AccessControlException;
-
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -49,8 +46,6 @@ public class ContentSummaryComputationContext {
 
   public static final String REPLICATED = "Replicated";
   public static final Log LOG = LogFactory.getLog(INode.class);
-
-  private FSPermissionChecker pc;
   /**
    * Constructor
    *
@@ -62,12 +57,6 @@ public class ContentSummaryComputationContext {
    */
   public ContentSummaryComputationContext(FSDirectory dir,
       FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
-    this(dir, fsn, limitPerRun, sleepMicroSec, null);
-  }
-
-  public ContentSummaryComputationContext(FSDirectory dir,
-      FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
-      FSPermissionChecker pc) {
     this.dir = dir;
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
@@ -76,7 +65,6 @@ public class ContentSummaryComputationContext {
     this.snapshotCounts = new ContentCounts.Builder().build();
     this.sleepMilliSec = sleepMicroSec/1000;
     this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
-    this.pc = pc;
   }
 
   /** Constructor for blocking computation. */
@@ -198,12 +186,4 @@ public class ContentSummaryComputationContext {
     }
     return "";
   }
-
-  void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
-      throws AccessControlException {
-    if (dir != null && dir.isPermissionEnabled()
-        && pc != null && !pc.isSuperUser()) {
-      pc.checkPermission(inode, snapshotId, access);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 0968c65..31b45ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.EnumCounters;
-import org.apache.hadoop.security.AccessControlException;
 
 /**
  * Quota feature for {@link INodeDirectory}. 
@@ -126,8 +125,7 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
   }
 
   ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
-      final ContentSummaryComputationContext summary)
-      throws AccessControlException {
+      final ContentSummaryComputationContext summary) {
     final long original = summary.getCounts().getStoragespace();
     long oldYieldCount = summary.getYieldCount();
     dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 4c92249..04efa65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -127,8 +127,10 @@ class FSDirStatAndListingOp {
       FSDirectory fsd, String src) throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
-    // getContentSummaryInt() call will check access (if enabled) when
-    // traversing all sub directories.
+    if (fsd.isPermissionEnabled()) {
+      fsd.checkPermission(pc, iip, false, null, null, null,
+          FsAction.READ_EXECUTE);
+    }
     return getContentSummaryInt(fsd, iip);
   }
 
@@ -511,8 +513,7 @@ class FSDirStatAndListingOp {
         // processed. 0 means disabled. I.e. blocking for the entire duration.
         ContentSummaryComputationContext cscc =
             new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
-                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
-                fsd.getPermissionChecker());
+                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
         ContentSummary cs = targetNode.computeAndConvertContentSummary(
             iip.getPathSnapshotId(), cscc);
         fsd.addYieldCount(cscc.getYieldCount());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f745a6c..f1250dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -195,38 +195,6 @@ class FSPermissionChecker implements AccessControlEnforcer {
         ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
   }
 
-  /**
-   * Check permission only for the given inode (not checking the children's
-   * access).
-   *
-   * @param inode the inode to check.
-   * @param snapshotId the snapshot id.
-   * @param access the target access.
-   * @throws AccessControlException
-   */
-  void checkPermission(INode inode, int snapshotId, FsAction access)
-      throws AccessControlException {
-    try {
-      byte[][] localComponents = {inode.getLocalNameBytes()};
-      INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
-      AccessControlEnforcer enforcer = getAccessControlEnforcer();
-      enforcer.checkPermission(
-          fsOwner, supergroup, callerUgi,
-          iNodeAttr, // single inode attr in the array
-          new INode[]{inode}, // single inode in the array
-          localComponents, snapshotId,
-          null, -1, // this will skip checkTraverse() because
-          // not checking ancestor here
-          false, null, null,
-          access, // the target access to be checked against the inode
-          null, // passing null sub access avoids checking children
-          false);
-    } catch (AccessControlException ace) {
-      throw new AccessControlException(
-          toAccessControlString(inode, inode.getFullPathName(), access));
-    }
-  }
-
   @Override
   public void checkPermission(String fsOwner, String supergroup,
       UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index d768e08..1f982ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.Diff;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.StringUtils;
 
@@ -419,8 +418,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
 
   /** Compute {@link ContentSummary}. Blocking call */
-  public final ContentSummary computeContentSummary(
-      BlockStoragePolicySuite bsps) throws AccessControlException {
+  public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
     return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
         new ContentSummaryComputationContext(bsps));
   }
@@ -429,7 +427,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * Compute {@link ContentSummary}. 
    */
   public final ContentSummary computeAndConvertContentSummary(int snapshotId,
-      ContentSummaryComputationContext summary) throws AccessControlException {
+      ContentSummaryComputationContext summary) {
     computeContentSummary(snapshotId, summary);
     final ContentCounts counts = summary.getCounts();
     final ContentCounts snapshotCounts = summary.getSnapshotCounts();
@@ -463,8 +461,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * @return The same objects as summary.
    */
   public abstract ContentSummaryComputationContext computeContentSummary(
-      int snapshotId, ContentSummaryComputationContext summary)
-      throws AccessControlException;
+      int snapshotId, ContentSummaryComputationContext summary);
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 3b7fa4e..4012783 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -26,7 +26,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttr;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.security.AccessControlException;
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
@@ -634,7 +632,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
 
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
-      ContentSummaryComputationContext summary) throws AccessControlException {
+      ContentSummaryComputationContext summary) {
     final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
       final ContentCounts counts = new ContentCounts.Builder().build();
@@ -656,10 +654,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
   }
 
   protected ContentSummaryComputationContext computeDirectoryContentSummary(
-      ContentSummaryComputationContext summary, int snapshotId)
-      throws AccessControlException{
-    // throws exception if failing the permission check
-    summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
+      ContentSummaryComputationContext summary, int snapshotId) {
     ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
     // Explicit traversing is done to enable repositioning after relinquishing
     // and reacquiring locks.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index db2026d..1b85237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.security.AccessControlException;
 
 /**
  * An anonymous reference to an inode.
@@ -315,7 +314,7 @@ public abstract class INodeReference extends INode {
 
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
-      ContentSummaryComputationContext summary) throws AccessControlException {
+      ContentSummaryComputationContext summary) {
     return referred.computeContentSummary(snapshotId, summary);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 0ab928d..fbfc278 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.util.Diff.ListType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -235,7 +234,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
 
   @Override
   public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) throws AccessControlException {
+      final ContentCounts counts) {
     counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
     counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
     super.computeContentSummary4Snapshot(bsps, counts);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7535879..0111b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.security.AccessControlException;
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
 
@@ -631,7 +630,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
   }
 
   public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) throws AccessControlException {
+      final ContentCounts counts) {
     // Create a new blank summary context for blocking processing of subtree.
     ContentSummaryComputationContext summary = 
         new ContentSummaryComputationContext(bsps);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index 515f164..e98e766 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
-import org.apache.hadoop.security.AccessControlException;
 
 /** Snapshot of a sub-tree in the namesystem. */
 @InterfaceAudience.Private
@@ -177,8 +176,7 @@ public class Snapshot implements Comparable<byte[]> {
 
     @Override
     public ContentSummaryComputationContext computeContentSummary(
-        int snapshotId, ContentSummaryComputationContext summary)
-        throws AccessControlException {
+        int snapshotId, ContentSummaryComputationContext summary) {
       return computeDirectoryContentSummary(summary, snapshotId);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
deleted file mode 100644
index 03aa440..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.security.PrivilegedExceptionAction;
-
-import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class tests get content summary with permission settings.
- */
-public class TestGetContentSummaryWithPermission {
-  protected static final short REPLICATION = 3;
-  protected static final long BLOCKSIZE = 1024;
-
-  private Configuration conf;
-  private MiniDFSCluster cluster;
-  private DistributedFileSystem dfs;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new Configuration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
-    cluster.waitActive();
-
-    dfs = cluster.getFileSystem();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (cluster != null) {
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  /**
-   * Test getContentSummary for super user. For super user, whatever
-   * permission the directories are with, always allowed to access
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testGetContentSummarySuperUser() throws Exception {
-    final Path foo = new Path("/fooSuper");
-    final Path bar = new Path(foo, "barSuper");
-    final Path baz = new Path(bar, "bazSuper");
-    dfs.mkdirs(bar);
-    DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
-
-    ContentSummary summary;
-
-    summary = cluster.getNameNodeRpc().getContentSummary(
-        foo.toString());
-    verifySummary(summary, 2, 1, 10);
-
-    dfs.setPermission(foo, new FsPermission((short)0));
-
-    summary = cluster.getNameNodeRpc().getContentSummary(
-        foo.toString());
-    verifySummary(summary, 2, 1, 10);
-
-    dfs.setPermission(bar, new FsPermission((short)0));
-
-    summary = cluster.getNameNodeRpc().getContentSummary(
-        foo.toString());
-    verifySummary(summary, 2, 1, 10);
-
-    dfs.setPermission(baz, new FsPermission((short)0));
-
-    summary = cluster.getNameNodeRpc().getContentSummary(
-        foo.toString());
-    verifySummary(summary, 2, 1, 10);
-  }
-
-  /**
-   * Test getContentSummary for non-super, non-owner. Such users are restricted
-   * by permission of subdirectories. Namely if there is any subdirectory that
-   * does not have READ_EXECUTE access, AccessControlException will be thrown.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testGetContentSummaryNonSuperUser() throws Exception {
-    final Path foo = new Path("/fooNoneSuper");
-    final Path bar = new Path(foo, "barNoneSuper");
-    final Path baz = new Path(bar, "bazNoneSuper");
-    // run as some random non-superuser, non-owner user.
-    final UserGroupInformation userUgi  =
-        UserGroupInformation.createUserForTesting(
-            "randomUser", new String[]{"randomGroup"});
-    dfs.mkdirs(bar);
-    DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
-
-    // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
-    // content summary should accessible
-    FileStatus fileStatus;
-    fileStatus = dfs.getFileStatus(foo);
-    assertEquals((short)755, fileStatus.getPermission().toOctal());
-    fileStatus = dfs.getFileStatus(bar);
-    assertEquals((short)755, fileStatus.getPermission().toOctal());
-    // file has no EXECUTE, it is rw-r--r-- default
-    fileStatus = dfs.getFileStatus(baz);
-    assertEquals((short)644, fileStatus.getPermission().toOctal());
-
-    // by default, can get content summary
-    ContentSummary summary =
-        userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
-            () -> cluster.getNameNodeRpc().getContentSummary(
-            foo.toString()));
-    verifySummary(summary, 2, 1, 10);
-
-    // set empty access on root dir, should disallow content summary
-    dfs.setPermission(foo, new FsPermission((short)0));
-    try {
-      userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
-          () -> cluster.getNameNodeRpc().getContentSummary(
-              foo.toString()));
-      fail("Should've fail due to access control exception.");
-    } catch (AccessControlException e) {
-      assertTrue(e.getMessage().contains("Permission denied"));
-    }
-
-    // restore foo's permission to allow READ_EXECUTE
-    dfs.setPermission(foo,
-        new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
-
-    // set empty access on subdir, should disallow content summary from root dir
-    dfs.setPermission(bar, new FsPermission((short)0));
-
-    try {
-      userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
-          () -> cluster.getNameNodeRpc().getContentSummary(
-              foo.toString()));
-      fail("Should've fail due to access control exception.");
-    } catch (AccessControlException e) {
-      assertTrue(e.getMessage().contains("Permission denied"));
-    }
-
-    // restore the permission of subdir to READ_EXECUTE. enable
-    // getContentSummary again for root
-    dfs.setPermission(bar,
-        new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
-
-    summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
-        () -> cluster.getNameNodeRpc().getContentSummary(
-                foo.toString()));
-    verifySummary(summary, 2, 1, 10);
-
-    // permission of files under the directory does not affect
-    // getContentSummary
-    dfs.setPermission(baz, new FsPermission((short)0));
-    summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
-        () -> cluster.getNameNodeRpc().getContentSummary(
-            foo.toString()));
-    verifySummary(summary, 2, 1, 10);
-  }
-
-  private void verifySummary(ContentSummary summary, int dirCount,
-      int fileCount, int length) {
-    assertEquals(dirCount, summary.getDirectoryCount());
-    assertEquals(fileCount, summary.getFileCount());
-    assertEquals(length, summary.getLength());
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/2] hadoop git commit: HDFS-12130. Optimizing permission check for getContentSummary. Contributed by Chen Liang

Posted by sz...@apache.org.
HDFS-12130. Optimizing permission check for getContentSummary.  Contributed by  Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f413ee33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f413ee33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f413ee33

Branch: refs/heads/trunk
Commit: f413ee33df301659c4ca9024380c2354983dcc84
Parents: a1f12bb
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Fri Jul 14 14:35:51 2017 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Fri Jul 14 14:35:51 2017 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockCollection.java |   4 +-
 .../ContentSummaryComputationContext.java       |  20 ++
 .../namenode/DirectoryWithQuotaFeature.java     |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   9 +-
 .../server/namenode/FSPermissionChecker.java    |  32 +++
 .../hadoop/hdfs/server/namenode/INode.java      |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java    |   9 +-
 .../hdfs/server/namenode/INodeReference.java    |   3 +-
 .../snapshot/DirectorySnapshottableFeature.java |   3 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   3 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   4 +-
 .../TestGetContentSummaryWithPermission.java    | 201 +++++++++++++++++++
 12 files changed, 285 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 2f214be..b880590 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.security.AccessControlException;
 
 /** 
  * This interface is used by the block manager to expose a
@@ -36,7 +37,8 @@ public interface BlockCollection {
   /** 
    * Get content summary.
    */
-  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+      throws AccessControlException;
 
   /**
    * @return the number of blocks or block groups

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 8d5aa0d..43e6f0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.AccessControlException;
+
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -46,6 +49,8 @@ public class ContentSummaryComputationContext {
 
   public static final String REPLICATED = "Replicated";
   public static final Log LOG = LogFactory.getLog(INode.class);
+
+  private FSPermissionChecker pc;
   /**
    * Constructor
    *
@@ -57,6 +62,12 @@ public class ContentSummaryComputationContext {
    */
   public ContentSummaryComputationContext(FSDirectory dir,
       FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
+    this(dir, fsn, limitPerRun, sleepMicroSec, null);
+  }
+
+  public ContentSummaryComputationContext(FSDirectory dir,
+      FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
+      FSPermissionChecker pc) {
     this.dir = dir;
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
@@ -65,6 +76,7 @@ public class ContentSummaryComputationContext {
     this.snapshotCounts = new ContentCounts.Builder().build();
     this.sleepMilliSec = sleepMicroSec/1000;
     this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
+    this.pc = pc;
   }
 
   /** Constructor for blocking computation. */
@@ -186,4 +198,12 @@ public class ContentSummaryComputationContext {
     }
     return "";
   }
+
+  void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
+      throws AccessControlException {
+    if (dir != null && dir.isPermissionEnabled()
+        && pc != null && !pc.isSuperUser()) {
+      pc.checkPermission(inode, snapshotId, access);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 31b45ad..0968c65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.EnumCounters;
+import org.apache.hadoop.security.AccessControlException;
 
 /**
  * Quota feature for {@link INodeDirectory}. 
@@ -125,7 +126,8 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
   }
 
   ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
-      final ContentSummaryComputationContext summary) {
+      final ContentSummaryComputationContext summary)
+      throws AccessControlException {
     final long original = summary.getCounts().getStoragespace();
     long oldYieldCount = summary.getYieldCount();
     dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 04efa65..4c92249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -127,10 +127,8 @@ class FSDirStatAndListingOp {
       FSDirectory fsd, String src) throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
-    if (fsd.isPermissionEnabled()) {
-      fsd.checkPermission(pc, iip, false, null, null, null,
-          FsAction.READ_EXECUTE);
-    }
+    // getContentSummaryInt() call will check access (if enabled) when
+    // traversing all sub directories.
     return getContentSummaryInt(fsd, iip);
   }
 
@@ -513,7 +511,8 @@ class FSDirStatAndListingOp {
         // processed. 0 means disabled. I.e. blocking for the entire duration.
         ContentSummaryComputationContext cscc =
             new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
-                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
+                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
+                fsd.getPermissionChecker());
         ContentSummary cs = targetNode.computeAndConvertContentSummary(
             iip.getPathSnapshotId(), cscc);
         fsd.addYieldCount(cscc.getYieldCount());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f1250dd..f745a6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -195,6 +195,38 @@ class FSPermissionChecker implements AccessControlEnforcer {
         ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
   }
 
+  /**
+   * Check permission only for the given inode (not checking the children's
+   * access).
+   *
+   * @param inode the inode to check.
+   * @param snapshotId the snapshot id.
+   * @param access the target access.
+   * @throws AccessControlException
+   */
+  void checkPermission(INode inode, int snapshotId, FsAction access)
+      throws AccessControlException {
+    try {
+      byte[][] localComponents = {inode.getLocalNameBytes()};
+      INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
+      AccessControlEnforcer enforcer = getAccessControlEnforcer();
+      enforcer.checkPermission(
+          fsOwner, supergroup, callerUgi,
+          iNodeAttr, // single inode attr in the array
+          new INode[]{inode}, // single inode in the array
+          localComponents, snapshotId,
+          null, -1, // this will skip checkTraverse() because
+          // not checking ancestor here
+          false, null, null,
+          access, // the target access to be checked against the inode
+          null, // passing null sub access avoids checking children
+          false);
+    } catch (AccessControlException ace) {
+      throw new AccessControlException(
+          toAccessControlString(inode, inode.getFullPathName(), access));
+    }
+  }
+
   @Override
   public void checkPermission(String fsOwner, String supergroup,
       UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 1f982ca..d768e08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.Diff;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.StringUtils;
 
@@ -418,7 +419,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
 
   /** Compute {@link ContentSummary}. Blocking call */
-  public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
+  public final ContentSummary computeContentSummary(
+      BlockStoragePolicySuite bsps) throws AccessControlException {
     return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
         new ContentSummaryComputationContext(bsps));
   }
@@ -427,7 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * Compute {@link ContentSummary}. 
    */
   public final ContentSummary computeAndConvertContentSummary(int snapshotId,
-      ContentSummaryComputationContext summary) {
+      ContentSummaryComputationContext summary) throws AccessControlException {
     computeContentSummary(snapshotId, summary);
     final ContentCounts counts = summary.getCounts();
     final ContentCounts snapshotCounts = summary.getSnapshotCounts();
@@ -461,7 +463,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * @return The same objects as summary.
    */
   public abstract ContentSummaryComputationContext computeContentSummary(
-      int snapshotId, ContentSummaryComputationContext summary);
+      int snapshotId, ContentSummaryComputationContext summary)
+      throws AccessControlException;
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 4012783..3b7fa4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttr;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
@@ -632,7 +634,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
 
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
-      ContentSummaryComputationContext summary) {
+      ContentSummaryComputationContext summary) throws AccessControlException {
     final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
       final ContentCounts counts = new ContentCounts.Builder().build();
@@ -654,7 +656,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
   }
 
   protected ContentSummaryComputationContext computeDirectoryContentSummary(
-      ContentSummaryComputationContext summary, int snapshotId) {
+      ContentSummaryComputationContext summary, int snapshotId)
+      throws AccessControlException{
+    // throws exception if failing the permission check
+    summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
     ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
     // Explicit traversing is done to enable repositioning after relinquishing
     // and reacquiring locks.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 1b85237..db2026d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
 
 /**
  * An anonymous reference to an inode.
@@ -314,7 +315,7 @@ public abstract class INodeReference extends INode {
 
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
-      ContentSummaryComputationContext summary) {
+      ContentSummaryComputationContext summary) throws AccessControlException {
     return referred.computeContentSummary(snapshotId, summary);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index fbfc278..0ab928d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.util.Diff.ListType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -234,7 +235,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
 
   @Override
   public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) {
+      final ContentCounts counts) throws AccessControlException {
     counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
     counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
     super.computeContentSummary4Snapshot(bsps, counts);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0111b3b..7535879 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
 
@@ -630,7 +631,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
   }
 
   public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) {
+      final ContentCounts counts) throws AccessControlException {
     // Create a new blank summary context for blocking processing of subtree.
     ContentSummaryComputationContext summary = 
         new ContentSummaryComputationContext(bsps);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index e98e766..515f164 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import org.apache.hadoop.security.AccessControlException;
 
 /** Snapshot of a sub-tree in the namesystem. */
 @InterfaceAudience.Private
@@ -176,7 +177,8 @@ public class Snapshot implements Comparable<byte[]> {
 
     @Override
     public ContentSummaryComputationContext computeContentSummary(
-        int snapshotId, ContentSummaryComputationContext summary) {
+        int snapshotId, ContentSummaryComputationContext summary)
+        throws AccessControlException {
       return computeDirectoryContentSummary(summary, snapshotId);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
new file mode 100644
index 0000000..03aa440
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.security.PrivilegedExceptionAction;
+
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * This class tests get content summary with permission settings.
+ */
+public class TestGetContentSummaryWithPermission {
+  protected static final short REPLICATION = 3;
+  protected static final long BLOCKSIZE = 1024;
+
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem dfs;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
+    cluster.waitActive();
+
+    dfs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  /**
+   * Test getContentSummary for super user. For super user, whatever
+   * permission the directories are with, always allowed to access
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testGetContentSummarySuperUser() throws Exception {
+    final Path foo = new Path("/fooSuper");
+    final Path bar = new Path(foo, "barSuper");
+    final Path baz = new Path(bar, "bazSuper");
+    dfs.mkdirs(bar);
+    DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+
+    ContentSummary summary;
+
+    summary = cluster.getNameNodeRpc().getContentSummary(
+        foo.toString());
+    verifySummary(summary, 2, 1, 10);
+
+    dfs.setPermission(foo, new FsPermission((short)0));
+
+    summary = cluster.getNameNodeRpc().getContentSummary(
+        foo.toString());
+    verifySummary(summary, 2, 1, 10);
+
+    dfs.setPermission(bar, new FsPermission((short)0));
+
+    summary = cluster.getNameNodeRpc().getContentSummary(
+        foo.toString());
+    verifySummary(summary, 2, 1, 10);
+
+    dfs.setPermission(baz, new FsPermission((short)0));
+
+    summary = cluster.getNameNodeRpc().getContentSummary(
+        foo.toString());
+    verifySummary(summary, 2, 1, 10);
+  }
+
+  /**
+   * Test getContentSummary for non-super, non-owner. Such users are restricted
+   * by permission of subdirectories. Namely if there is any subdirectory that
+   * does not have READ_EXECUTE access, AccessControlException will be thrown.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testGetContentSummaryNonSuperUser() throws Exception {
+    final Path foo = new Path("/fooNoneSuper");
+    final Path bar = new Path(foo, "barNoneSuper");
+    final Path baz = new Path(bar, "bazNoneSuper");
+    // run as some random non-superuser, non-owner user.
+    final UserGroupInformation userUgi  =
+        UserGroupInformation.createUserForTesting(
+            "randomUser", new String[]{"randomGroup"});
+    dfs.mkdirs(bar);
+    DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+
+    // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
+    // content summary should accessible
+    FileStatus fileStatus;
+    fileStatus = dfs.getFileStatus(foo);
+    assertEquals((short)755, fileStatus.getPermission().toOctal());
+    fileStatus = dfs.getFileStatus(bar);
+    assertEquals((short)755, fileStatus.getPermission().toOctal());
+    // file has no EXECUTE, it is rw-r--r-- default
+    fileStatus = dfs.getFileStatus(baz);
+    assertEquals((short)644, fileStatus.getPermission().toOctal());
+
+    // by default, can get content summary
+    ContentSummary summary =
+        userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+            () -> cluster.getNameNodeRpc().getContentSummary(
+            foo.toString()));
+    verifySummary(summary, 2, 1, 10);
+
+    // set empty access on root dir, should disallow content summary
+    dfs.setPermission(foo, new FsPermission((short)0));
+    try {
+      userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+          () -> cluster.getNameNodeRpc().getContentSummary(
+              foo.toString()));
+      fail("Should've fail due to access control exception.");
+    } catch (AccessControlException e) {
+      assertTrue(e.getMessage().contains("Permission denied"));
+    }
+
+    // restore foo's permission to allow READ_EXECUTE
+    dfs.setPermission(foo,
+        new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
+
+    // set empty access on subdir, should disallow content summary from root dir
+    dfs.setPermission(bar, new FsPermission((short)0));
+
+    try {
+      userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+          () -> cluster.getNameNodeRpc().getContentSummary(
+              foo.toString()));
+      fail("Should've fail due to access control exception.");
+    } catch (AccessControlException e) {
+      assertTrue(e.getMessage().contains("Permission denied"));
+    }
+
+    // restore the permission of subdir to READ_EXECUTE. enable
+    // getContentSummary again for root
+    dfs.setPermission(bar,
+        new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
+
+    summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+        () -> cluster.getNameNodeRpc().getContentSummary(
+                foo.toString()));
+    verifySummary(summary, 2, 1, 10);
+
+    // permission of files under the directory does not affect
+    // getContentSummary
+    dfs.setPermission(baz, new FsPermission((short)0));
+    summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+        () -> cluster.getNameNodeRpc().getContentSummary(
+            foo.toString()));
+    verifySummary(summary, 2, 1, 10);
+  }
+
+  private void verifySummary(ContentSummary summary, int dirCount,
+      int fileCount, int length) {
+    assertEquals(dirCount, summary.getDirectoryCount());
+    assertEquals(fileCount, summary.getFileCount());
+    assertEquals(length, summary.getLength());
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org