You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by ex...@apache.org on 2021/11/03 02:57:30 UTC

[nifi] branch main updated: NIFI-9360 Update PutHDFS to handle filesystems which do not support getAclStatus()

This is an automated email from the ASF dual-hosted git repository.

exceptionfactory pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/nifi.git


The following commit(s) were added to refs/heads/main by this push:
     new 362a243  NIFI-9360 Update PutHDFS to handle filesystems which do not support getAclStatus()
362a243 is described below

commit 362a243e0fe6a2712c38cf3aabab1b79610d9328
Author: Paul Grey <gr...@yahoo.com>
AuthorDate: Tue Nov 2 17:18:08 2021 -0400

    NIFI-9360 Update PutHDFS to handle filesystems which do not support getAclStatus()
    
    This closes #5505
    
    Signed-off-by: David Handermann <ex...@apache.org>
---
 .../java/org/apache/nifi/processors/hadoop/PutHDFS.java    | 14 +++++++++++---
 .../org/apache/nifi/processors/hadoop/PutHDFSTest.java     |  2 +-
 2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java
index 462033d..62b7996 100644
--- a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java
+++ b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java
@@ -21,6 +21,7 @@ import com.github.benmanes.caffeine.cache.Caffeine;
 import com.google.common.base.Throwables;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntryScope;
@@ -280,7 +281,6 @@ public class PutHDFS extends AbstractHadoopProcessor {
                 FlowFile putFlowFile = flowFile;
                 try {
                     final Path dirPath = getNormalizedPath(context, DIRECTORY, putFlowFile);
-                    checkAclStatus(getAclStatus(dirPath));
                     final String conflictResponse = context.getProperty(CONFLICT_RESOLUTION).getValue();
                     final long blockSize = getBlockSize(context, session, putFlowFile, dirPath);
                     final int bufferSize = getBufferSize(context, session, putFlowFile);
@@ -298,14 +298,22 @@ public class PutHDFS extends AbstractHadoopProcessor {
                     // Create destination directory if it does not exist
                     boolean targetDirCreated = false;
                     try {
-                        if (!hdfs.getFileStatus(dirPath).isDirectory()) {
+                        final FileStatus fileStatus = hdfs.getFileStatus(dirPath);
+                        if (!fileStatus.isDirectory()) {
                             throw new IOException(dirPath.toString() + " already exists and is not a directory");
                         }
+                        if (fileStatus.hasAcl()) {
+                            checkAclStatus(getAclStatus(dirPath));
+                        }
                     } catch (FileNotFoundException fe) {
                         targetDirCreated = hdfs.mkdirs(dirPath);
                         if (!targetDirCreated) {
                             throw new IOException(dirPath.toString() + " could not be created");
                         }
+                        final FileStatus fileStatus = hdfs.getFileStatus(dirPath);
+                        if (fileStatus.hasAcl()) {
+                            checkAclStatus(getAclStatus(dirPath));
+                        }
                         changeOwner(context, hdfs, dirPath, flowFile);
                     }
 
@@ -463,7 +471,7 @@ public class PutHDFS extends AbstractHadoopProcessor {
                 return aclCache.get(dirPath, fn -> {
                     try {
                         return hdfs.getAclStatus(dirPath);
-                    } catch (IOException e) {
+                    } catch (final IOException e) {
                         throw new UncheckedIOException(String.format("Unable to query ACL for directory [%s]", dirPath), e);
                     }
                 });
diff --git a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java
index abe288d..c41114b 100644
--- a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java
+++ b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java
@@ -730,7 +730,7 @@ public class PutHDFSTest {
         }
 
         private FileStatus newDir(Path p) {
-            return new FileStatus(1L, true, 3, 128 * 1024 * 1024, 1523456000000L, 1523457000000L, perms((short) 0755), "owner", "group", p);
+            return new FileStatus(1L, true, 3, 128 * 1024 * 1024, 1523456000000L, 1523457000000L, perms((short) 0755), "owner", "group", (Path)null, p, true, false, false);
         }
 
         @Override