You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ey...@apache.org on 2016/08/23 01:31:08 UTC

hadoop git commit: HDFS-8312. Added permission check for moving file to Trash. (Weiwei Yang via Eric Yang)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 4070caad7 -> c49333bec


HDFS-8312. Added permission check for moving file to Trash. (Weiwei Yang via Eric Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c49333be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c49333be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c49333be

Branch: refs/heads/trunk
Commit: c49333becfa7652460976a61eb86522010bcfeed
Parents: 4070caa
Author: Eric Yang <ey...@apache.org>
Authored: Mon Aug 22 18:29:56 2016 -0700
Committer: Eric Yang <ey...@apache.org>
Committed: Mon Aug 22 18:29:56 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/Options.java |  3 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java    | 10 ++-
 .../ClientNamenodeProtocolTranslatorPB.java     |  7 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 14 +++-
 .../hdfs/server/namenode/FSDirRenameOp.java     | 28 +++++--
 .../apache/hadoop/hdfs/TestDFSPermission.java   | 81 ++++++++++++++++++++
 7 files changed, 132 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index da75d1c..dc50286 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -213,7 +213,8 @@ public final class Options {
    */
   public static enum Rename {
     NONE((byte) 0), // No options
-    OVERWRITE((byte) 1); // Overwrite the rename destination
+    OVERWRITE((byte) 1), // Overwrite the rename destination
+    TO_TRASH ((byte) 2); // Rename to trash
 
     private final byte code;
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 14f4c0c..66ef890 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -106,6 +106,7 @@ public class TrashPolicyDefault extends TrashPolicy {
     return deletionInterval != 0;
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public boolean moveToTrash(Path path) throws IOException {
     if (!isEnabled())
@@ -156,10 +157,11 @@ public class TrashPolicyDefault extends TrashPolicy {
           trashPath = new Path(orig + Time.now());
         }
         
-        if (fs.rename(path, trashPath)) {           // move to current trash
-          LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
-          return true;
-        }
+        // move to current trash
+        fs.rename(path, trashPath,
+            Rename.TO_TRASH);
+        LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
+        return true;
       } catch (IOException e) {
         cause = e;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index bcf5269..57f8fd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -523,16 +523,21 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void rename2(String src, String dst, Rename... options)
       throws IOException {
     boolean overwrite = false;
+    boolean toTrash = false;
     if (options != null) {
       for (Rename option : options) {
         if (option == Rename.OVERWRITE) {
           overwrite = true;
+        } else if (option == Rename.TO_TRASH) {
+          toTrash = true;
         }
       }
     }
     Rename2RequestProto req = Rename2RequestProto.newBuilder().
         setSrc(src).
-        setDst(dst).setOverwriteDest(overwrite).
+        setDst(dst).
+        setOverwriteDest(overwrite).
+        setMoveToTrash(toTrash).
         build();
     try {
       if (Client.isAsynchronousMode()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index aac48a4..83f5828 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -245,6 +245,7 @@ message Rename2RequestProto {
   required string src = 1;
   required string dst = 2;
   required bool overwriteDest = 3;
+  optional bool moveToTrash = 4;
 }
 
 message Rename2ResponseProto { // void response

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 7a4ff82..641cd20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocolPB;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
 
@@ -602,10 +603,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   @Override
   public Rename2ResponseProto rename2(RpcController controller,
       Rename2RequestProto req) throws ServiceException {
+    // resolve rename options
+    ArrayList<Rename> optionList = new ArrayList<Rename>();
+    if(req.getOverwriteDest()) {
+      optionList.add(Rename.OVERWRITE);
+    } else if(req.hasMoveToTrash()) {
+      optionList.add(Rename.TO_TRASH);
+    }
+
+    if(optionList.isEmpty()) {
+      optionList.add(Rename.NONE);
+    }
 
     try {
       server.rename2(req.getSrc(), req.getDst(), 
-          req.getOverwriteDest() ? Rename.OVERWRITE : Rename.NONE);
+          optionList.toArray(new Rename[optionList.size()]));
     } catch (IOException e) {
       throw new ServiceException(e);
     }   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index d90139b..f98f8b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -261,11 +261,29 @@ class FSDirRenameOp {
     src = srcIIP.getPath();
     dst = dstIIP.getPath();
     if (fsd.isPermissionEnabled()) {
-      // Rename does not operate on link targets
-      // Do not resolveLink when checking permissions of src and dst
-      // Check write access to parent of src
-      fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
-          false);
+      boolean renameToTrash = false;
+      if (null != options &&
+          Arrays.asList(options).
+          contains(Options.Rename.TO_TRASH)) {
+        renameToTrash = true;
+      }
+
+      if(renameToTrash) {
+        // if destination is the trash directory,
+        // besides the permission check on "rename"
+        // we need to enforce the check for "delete"
+        // otherwise, it would expose a
+        // security hole that stuff moved to trash
+        // will be deleted by superuser
+        fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null,
+            FsAction.ALL, true);
+      } else {
+        // Rename does not operate on link targets
+        // Do not resolveLink when checking permissions of src and dst
+        // Check write access to parent of src
+        fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null,
+            null, false);
+      }
       // Check write access to ancestor of dst
       fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null,
           false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index e6524f3..d0d00e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.security.AccessControlException;
@@ -288,6 +289,86 @@ public class TestDFSPermission {
         FsPermission.createImmutable((short)0777));
   }
   
+  @Test(timeout=30000)
+  public void testTrashPermission() throws Exception {
+    //  /BSS                  user1:group2 777
+    //   /BSS/user1            user1:group2 755
+    //   /BSS/user1/test       user1:group1 600
+    Path rootDir = new Path("/BSS");
+    Path user1Dir = new Path("/BSS/user1");
+    Path user1File = new Path("/BSS/user1/test");
+
+    try {
+      conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
+      fs = FileSystem.get(conf);
+
+      fs.mkdirs(rootDir);
+      fs.setPermission(rootDir, new FsPermission((short) 0777));
+
+      login(USER1);
+      fs.mkdirs(user1Dir);
+      fs.setPermission(user1Dir, new FsPermission((short) 0755));
+      fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
+
+      create(OpType.CREATE, user1File);
+      fs.setOwner(user1File, USER1.getShortUserName(), GROUP1_NAME);
+      fs.setPermission(user1File, new FsPermission((short) 0600));
+
+      try {
+        // login as user2, attempt to delete /BSS/user1
+        // this should fail because user2 has no permission to
+        // its sub directory.
+        login(USER2);
+        fs.delete(user1Dir, true);
+        fail("User2 should not be allowed to delete user1's dir.");
+      } catch (AccessControlException e) {
+        e.printStackTrace();
+        assertTrue("Permission denied messages must carry the username",
+            e.getMessage().contains(USER2_NAME));
+      }
+
+      // ensure the /BSS/user1 still exists
+      assertTrue(fs.exists(user1Dir));
+
+      try {
+        login(SUPERUSER);
+        Trash trash = new Trash(fs, conf);
+        Path trashRoot = trash.getCurrentTrashDir(user1Dir);
+        while(true) {
+          trashRoot = trashRoot.getParent();
+          if(trashRoot.getParent().isRoot()) {
+            break;
+          }
+        }
+        fs.mkdirs(trashRoot);
+        fs.setPermission(trashRoot, new FsPermission((short) 0777));
+
+        // login as user2, attempt to move /BSS/user1 to trash
+        // this should also fail otherwise the directory will be
+        // removed by trash emptier (emptier is running by superuser)
+        login(USER2);
+        Trash userTrash = new Trash(fs, conf);
+        assertTrue(userTrash.isEnabled());
+        userTrash.moveToTrash(user1Dir);
+        fail("User2 should not be allowed to move"
+            + "user1's dir to trash");
+      } catch (IOException e) {
+        // expect the exception is caused by permission denied
+        assertTrue(e.getCause() instanceof AccessControlException);
+        e.printStackTrace();
+        assertTrue("Permission denied messages must carry the username",
+            e.getCause().getMessage().contains(USER2_NAME));
+      }
+
+      // ensure /BSS/user1 still exists
+      assertEquals(fs.exists(user1Dir), true);
+    } finally {
+      login(SUPERUSER);
+      fs.delete(rootDir, true);
+      conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0");
+    }
+  }
+
   /* check if the ownership of a file/directory is set correctly */
   @Test
   public void testOwnership() throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org