You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by bh...@apache.org on 2020/09/14 18:42:44 UTC

[hadoop-ozone] branch master updated: HDDS-4155. Directory and filename can end up with same name in a path. (#1361)

This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 68869d1  HDDS-4155. Directory and filename can end up with same name in a path. (#1361)
68869d1 is described below

commit 68869d1743484826136a85ce07263a70979b26ef
Author: Bharat Viswanadham <bh...@apache.org>
AuthorDate: Mon Sep 14 11:42:27 2020 -0700

    HDDS-4155. Directory and filename can end up with same name in a path. (#1361)
---
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 120 +++++++++++++++++++++
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  11 ++
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  19 ++++
 .../S3MultipartUploadCompleteRequest.java          |  10 ++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  21 ++++
 5 files changed, 181 insertions(+)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index c4e5435..f288973 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.fs.ozone;
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -29,6 +31,8 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
@@ -40,9 +44,13 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.junit.Assert.fail;
 
 /**
@@ -209,6 +217,118 @@ public class TestOzoneFSWithObjectStoreCreate {
 
   }
 
+
+  @Test
+  public void testKeyCreationFailDuetoDirectoryCreationBeforeCommit()
+      throws Exception {
+    OzoneVolume ozoneVolume =
+        cluster.getRpcClient().getObjectStore().getVolume(volumeName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+
+    OzoneOutputStream ozoneOutputStream =
+        ozoneBucket.createKey("/a/b/c", 10);
+    byte[] b = new byte[10];
+    Arrays.fill(b, (byte)96);
+    ozoneOutputStream.write(b);
+
+    // Before close create directory with same name.
+    o3fs.mkdirs(new Path("/a/b/c"));
+
+    try {
+      ozoneOutputStream.close();
+      fail("testKeyCreationFailDuetoDirectoryCreationBeforeCommit");
+    } catch (IOException ex) {
+      Assert.assertTrue(ex instanceof OMException);
+      Assert.assertEquals(NOT_A_FILE,
+          ((OMException) ex).getResult());
+    }
+
+  }
+
+
+  @Test
+  public void testMPUFailDuetoDirectoryCreationBeforeComplete()
+      throws Exception {
+    OzoneVolume ozoneVolume =
+        cluster.getRpcClient().getObjectStore().getVolume(volumeName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+
+    String keyName = "/dir1/dir2/mpukey";
+    OmMultipartInfo omMultipartInfo =
+        ozoneBucket.initiateMultipartUpload(keyName);
+    Assert.assertNotNull(omMultipartInfo);
+
+    OzoneOutputStream ozoneOutputStream =
+        ozoneBucket.createMultipartKey(keyName, 10, 1,
+            omMultipartInfo.getUploadID());
+    byte[] b = new byte[10];
+    Arrays.fill(b, (byte)96);
+    ozoneOutputStream.write(b);
+
+    // Before close, create directory with same name.
+    o3fs.mkdirs(new Path(keyName));
+
+    // This should succeed, as we check during creation of part or during
+    // complete MPU.
+    ozoneOutputStream.close();
+
+    Map<Integer, String> partsMap = new HashMap<>();
+    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
+
+    // Should fail, as we have directory with same name.
+    try {
+      ozoneBucket.completeMultipartUpload(keyName,
+          omMultipartInfo.getUploadID(), partsMap);
+      fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed");
+    } catch (OMException ex) {
+      Assert.assertTrue(ex instanceof OMException);
+      Assert.assertEquals(NOT_A_FILE, ex.getResult());
+    }
+
+    // Delete directory
+    o3fs.delete(new Path(keyName), true);
+
+    // And try again for complete MPU. This should succeed.
+    ozoneBucket.completeMultipartUpload(keyName,
+        omMultipartInfo.getUploadID(), partsMap);
+
+    try (FSDataInputStream ozoneInputStream = o3fs.open(new Path(keyName))) {
+      byte[] buffer = new byte[10];
+      // This read will not change the offset inside the file
+      int readBytes = ozoneInputStream.read(0, buffer, 0, 10);
+      String readData = new String(buffer, 0, readBytes, UTF_8);
+      Assert.assertEquals(new String(b, 0, b.length, UTF_8), readData);
+    }
+
+  }
+
+  @Test
+  public void testCreateDirectoryFirstThenKeyAndFileWithSameName()
+      throws Exception {
+    o3fs.mkdirs(new Path("/t1/t2"));
+
+    try {
+      o3fs.create(new Path("/t1/t2"));
+      fail("testCreateDirectoryFirstThenFileWithSameName failed");
+    } catch (FileAlreadyExistsException ex) {
+      Assert.assertTrue(ex.getMessage().contains(NOT_A_FILE.name()));
+    }
+
+    OzoneVolume ozoneVolume =
+        cluster.getRpcClient().getObjectStore().getVolume(volumeName);
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    ozoneBucket.createDirectory("t1/t2");
+    try {
+      ozoneBucket.createKey("t1/t2", 0);
+      fail("testCreateDirectoryFirstThenFileWithSameName failed");
+    } catch (OMException ex) {
+      Assert.assertTrue(ex instanceof OMException);
+      Assert.assertEquals(NOT_A_FILE, ex.getResult());
+    }
+  }
+
   private void checkPath(Path path) {
     try {
       o3fs.getFileStatus(path);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index dccb93b..8ee3f17 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
@@ -158,6 +159,16 @@ public class OMKeyCommitRequest extends OMKeyRequest {
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
+      // Check for directory exists with same name, if it exists throw error. 
+      if (ozoneManager.getEnableFileSystemPaths()) {
+        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
+            omMetadataManager)) {
+          throw new OMException("Can not create file: " + keyName +
+              " as there is already directory in the given path", NOT_A_FILE);
+        }
+      }
+
+
       omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit key, as " + dbOpenKey +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index d863073..f55cc5d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -530,4 +530,23 @@ public abstract class OMKeyRequest extends OMClientRequest {
     }
     return encryptionInfo;
   }
+
+  /**
+   * Check directory exists. If exists return true, else false.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @param omMetadataManager
+   * @throws IOException
+   */
+  protected boolean checkDirectoryAlreadyExists(String volumeName,
+      String bucketName, String keyName, OMMetadataManager omMetadataManager)
+      throws IOException {
+    if (omMetadataManager.getKeyTable().isExist(
+        omMetadataManager.getOzoneDirKey(volumeName, bucketName,
+            keyName))) {
+      return true;
+    }
+    return false;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 83cc28b..dff022b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import com.google.common.base.Optional;
 import org.apache.commons.codec.digest.DigestUtils;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -140,6 +141,15 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
           .getMultipartInfoTable().get(multipartKey);
 
+      // Check for directory exists with same name, if it exists throw error. 
+      if (ozoneManager.getEnableFileSystemPaths()) {
+        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
+            omMetadataManager)) {
+          throw new OMException("Can not Complete MPU for file: " + keyName +
+              " as there is already directory in the given path", NOT_A_FILE);
+        }
+      }
+
       if (multipartKeyInfo == null) {
         throw new OMException(
             failureMessage(requestedVolume, requestedBucket, keyName),
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 1eac008..a31986e 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -83,6 +83,7 @@ import org.apache.commons.io.IOUtils;
 
 import org.apache.commons.lang3.tuple.Pair;
 
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
 import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL;
@@ -198,6 +199,18 @@ public class ObjectEndpoint extends EndpointBase {
           .build();
     } catch (IOException ex) {
       LOG.error("Exception occurred in PutObject", ex);
+      if (ex instanceof  OMException) {
+        if (((OMException) ex).getResult() == ResultCodes.NOT_A_FILE) {
+          OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST,
+              keyPath);
+          os3Exception.setErrorMessage("An error occurred (InvalidRequest) " +
+              "when calling the PutObject/MPU PartUpload operation: " +
+              OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" +
+              " considered as Unix Paths. Path has Violated FS Semantics " +
+              "which caused put operation to fail.");
+          throw os3Exception;
+        }
+      }
       throw ex;
     } finally {
       if (output != null) {
@@ -522,6 +535,14 @@ public class ObjectEndpoint extends EndpointBase {
             "when calling the CompleteMultipartUpload operation: You must " +
             "specify at least one part");
         throw os3Exception;
+      } else if(ex.getResult() == ResultCodes.NOT_A_FILE) {
+        OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key);
+        os3Exception.setErrorMessage("An error occurred (InvalidRequest) " +
+            "when calling the CompleteMultipartUpload operation: " +
+            OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " +
+            "considered as Unix Paths. A directory already exists with a " +
+            "given KeyName caused failure for MPU");
+        throw os3Exception;
       }
       throw ex;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org