You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/11/10 07:48:12 UTC

[ozone] branch master updated: HDDS-5839. Make sure buckets created from OFS are in FILE_SYSTEM_OPTIMIZED layout (#2730)

This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new c63479c  HDDS-5839. Make sure buckets created from OFS are in FILE_SYSTEM_OPTIMIZED layout (#2730)
c63479c is described below

commit c63479c2e1b3c5b2743c5675c4544c16f3189986
Author: Jyotinder Singh <jy...@gmail.com>
AuthorDate: Wed Nov 10 13:17:54 2021 +0530

    HDDS-5839. Make sure buckets created from OFS are in FILE_SYSTEM_OPTIMIZED layout (#2730)
---
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |  6 ++++
 .../common/src/main/resources/ozone-default.xml    | 15 ++++++++++
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  2 +-
 .../dist/src/main/compose/ozone-mr/common-config   |  1 +
 .../src/main/compose/ozonesecure/docker-config     |  1 +
 .../main/smoketest/security/ozone-secure-fs.robot  |  2 +-
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |  3 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  9 +++---
 .../ozone/om/request/file/OMFileRequest.java       | 32 ++++++++++++++++++----
 .../om/request/key/OMKeyCommitRequestWithFSO.java  |  3 +-
 .../TestS3MultipartUploadCommitPartRequest.java    | 10 +++++--
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   | 17 ++++++++----
 12 files changed, 82 insertions(+), 19 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 3c97d05..2bcba59 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -455,6 +455,12 @@ public final class OzoneConfigKeys {
   public static final boolean OZONE_CLIENT_KEY_LATEST_VERSION_LOCATION_DEFAULT =
       true;
 
+  public static final String OZONE_CLIENT_TEST_OFS_DEFAULT_BUCKET_LAYOUT =
+      "ozone.client.test.ofs.default.bucket.layout";
+
+  public static final String OZONE_CLIENT_TEST_OFS_BUCKET_LAYOUT_DEFAULT =
+      "FILE_SYSTEM_OPTIMIZED";
+
   /**
    * There is no need to instantiate this class.
    */
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 281b8dc..4cff33d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2993,6 +2993,21 @@
       FILE_SYSTEM_OPTIMIZED: This layout allows the bucket to support atomic rename/delete operations and
       also allows interoperability between S3 and FS APIs. Keys written via S3 API with a "/" delimiter
       will create intermediate directories.
+      For testing purposes LEGACY layout is also allowed. LEGACY layout is not recommended and will be removed in the
+      future.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.client.test.ofs.default.bucket.layout</name>
+    <value>FILE_SYSTEM_OPTIMIZED</value>
+    <tag>OZONE, CLIENT</tag>
+    <description>
+      This configuration is used for testing purposes. Default bucket layout value when buckets are created using OFS.
+      Supported values are LEGACY and FILE_SYSTEM_OPTIMIZED.
+      FILE_SYSTEM_OPTIMIZED: This layout allows the bucket to support atomic rename/delete operations and
+      also allows interoperability between S3 and FS APIs. Keys written via S3 API with a "/" delimiter
+      will create intermediate directories.
     </description>
   </property>
 </configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 164e30b..4502db4 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -275,7 +275,7 @@ public final class OMConfigKeys {
       "ozone.default.bucket.layout";
   public static final String OZONE_DEFAULT_BUCKET_LAYOUT_DEFAULT =
       BucketLayout.OBJECT_STORE.name();
-  public static final String OZONE_DEFAULT_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED =
+  public static final String OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED =
       BucketLayout.FILE_SYSTEM_OPTIMIZED.name();
 
   /**
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config
index d5a92ee..fd4150b 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config
@@ -27,6 +27,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.replication=3
 OZONE-SITE.XML_hdds.scm.safemode.min.datanode=3
+OZONE-SITE.XML_ozone.client.test.ofs.default.bucket.layout=LEGACY
 
 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index f34dd0a..2a109fd 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -34,6 +34,7 @@ OZONE-SITE.XML_hdds.grpc.tls.enabled=true
 OZONE-SITE.XML_ozone.replication=3
 OZONE-SITE.XML_ozone.datanode.pipeline.limit=1
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
+OZONE-SITE.XML_ozone.client.test.ofs.default.bucket.layout=LEGACY
 
 OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m
 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
index 9736968..74f8ccf 100644
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
@@ -64,7 +64,7 @@ Create volume bucket with credentials
                         # Authenticate testuser
     Run Keyword         Kinit test user     testuser     testuser.keytab
     Run Keyword         Setup volume names
-    Execute             ozone sh volume create o3://om/${volume1} 
+    Execute             ozone sh volume create o3://om/${volume1}
     Execute             ozone sh volume create o3://om/${volume2}
     Execute             ozone sh bucket create o3://om/${volume1}/bucket1
     Execute             ozone sh bucket create o3://om/${volume1}/bucket2
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index a5c1c81..cd6790f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -174,6 +174,9 @@ public class TestRootedOzoneFileSystem {
           true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
     } else {
       bucketLayout = BucketLayout.LEGACY;
+      // We need the OFS buckets to be in LEGACY layout for this test.
+      conf.set(OzoneConfigKeys.OZONE_CLIENT_TEST_OFS_DEFAULT_BUCKET_LAYOUT,
+          BucketLayout.LEGACY.name());
       conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
           enabledFileSystemPaths);
     }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index b52b3b1..4c67878 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -457,16 +457,17 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     this.defaultBucketLayout =
         configuration.getTrimmed(OZONE_DEFAULT_BUCKET_LAYOUT,
             OZONE_DEFAULT_BUCKET_LAYOUT_DEFAULT);
-    // Make sure defaultBucketLayout is set to a valid value
+
     if (!defaultBucketLayout.equals(
         BucketLayout.FILE_SYSTEM_OPTIMIZED.name()) &&
-        !defaultBucketLayout.equals(BucketLayout.OBJECT_STORE.name())
+        !defaultBucketLayout.equals(BucketLayout.OBJECT_STORE.name()) &&
+        !defaultBucketLayout.equals(BucketLayout.LEGACY.name())
     ) {
       throw new ConfigurationException(
           defaultBucketLayout +
               " is not a valid default bucket layout. Supported values are " +
-              BucketLayout.FILE_SYSTEM_OPTIMIZED.name() + ", " +
-              BucketLayout.OBJECT_STORE.name() + ".");
+              BucketLayout.FILE_SYSTEM_OPTIMIZED + ", " +
+              BucketLayout.OBJECT_STORE + ", " + BucketLayout.LEGACY + ".");
     }
 
     InetSocketAddress omNodeRpcAddr = omNodeDetails.getRpcAddress();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 536146a..38313b8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -29,6 +29,7 @@ import java.util.Map;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
@@ -56,7 +57,6 @@ import org.slf4j.LoggerFactory;
 import javax.annotation.Nonnull;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
 
@@ -950,7 +950,27 @@ public final class OMFileRequest {
    * @throws IOException DB failure or parent not exists in DirectoryTable
    */
   public static long getParentID(long bucketId, Iterator<Path> pathComponents,
-      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+                                 String keyName,
+                                 OMMetadataManager omMetadataManager)
+      throws IOException {
+
+    return getParentID(bucketId, pathComponents, keyName, omMetadataManager,
+        null);
+  }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId       bucket id
+   * @param pathComponents fie path elements
+   * @param keyName        user given key name
+   * @param omMetadataManager   om metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  public static long getParentID(long bucketId, Iterator<Path> pathComponents,
+      String keyName, OMMetadataManager omMetadataManager, String errMsg)
+      throws IOException {
 
     long lastKnownParentId = bucketId;
 
@@ -958,7 +978,9 @@ public final class OMFileRequest {
     if(!pathComponents.hasNext()){
       return bucketId;
     }
-
+    if (StringUtils.isBlank(errMsg)) {
+      errMsg = "Failed to find parent directory of " + keyName;
+    }
     OmDirectoryInfo omDirectoryInfo;
     while (pathComponents.hasNext()) {
       String nodeName = pathComponents.next().toString();
@@ -979,8 +1001,8 @@ public final class OMFileRequest {
         // One of the sub-dir doesn't exists in DB. Immediate parent should
         // exists for committing the key, otherwise will fail the operation.
         if (!reachedLastPathComponent) {
-          throw new OMException("Failed to find parent directory of "
-                  + keyName + " in DirectoryTable", KEY_NOT_FOUND);
+          throw new OMException(errMsg,
+              OMException.ResultCodes.DIRECTORY_NOT_FOUND);
         }
         break;
       }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
index 178fb62..c86b630 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
@@ -124,7 +124,8 @@ public class OMKeyCommitRequestWithFSO extends OMKeyCommitRequest {
       omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
       long bucketId = omBucketInfo.getObjectID();
       long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
-              keyName, omMetadataManager);
+          keyName, omMetadataManager, "Cannot create file : " + keyName
+              + " as parent directory doesn't exist");
       String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
       dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
               commitKeyRequest.getClientID());
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index 1bff324..71c5a4d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -20,6 +20,7 @@
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@@ -173,8 +174,13 @@ public class TestS3MultipartUploadCommitPartRequest
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
             2L, ozoneManagerDoubleBufferHelper);
 
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
-        == OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND);
+    if (getBucketLayout() == BucketLayout.FILE_SYSTEM_OPTIMIZED) {
+      Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+          == OzoneManagerProtocolProtos.Status.DIRECTORY_NOT_FOUND);
+    } else {
+      Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+          == OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND);
+    }
 
   }
 
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 1f8f25e..43406b7 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.OFSPath;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
@@ -60,6 +61,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -78,10 +80,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
     .BUCKET_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
     .VOLUME_ALREADY_EXISTS;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
-    .VOLUME_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
-    .BUCKET_NOT_FOUND;
 
 /**
  * Basic Implementation of the RootedOzoneFileSystem calls.
@@ -102,6 +100,7 @@ public class BasicRootedOzoneClientAdapterImpl
   private ReplicationConfig replicationConfig;
   private boolean securityEnabled;
   private int configuredDnPort;
+  private BucketLayout defaultOFSBucketLayout;
 
   /**
    * Create new OzoneClientAdapter implementation.
@@ -187,6 +186,11 @@ public class BasicRootedOzoneClientAdapterImpl
       this.configuredDnPort = conf.getInt(
           OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
           OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+
+      // Fetches the bucket layout to be used by OFS.
+      this.defaultOFSBucketLayout = BucketLayout.fromString(
+          conf.get(OzoneConfigKeys.OZONE_CLIENT_TEST_OFS_DEFAULT_BUCKET_LAYOUT,
+              OzoneConfigKeys.OZONE_CLIENT_TEST_OFS_BUCKET_LAYOUT_DEFAULT));
     } finally {
       Thread.currentThread().setContextClassLoader(contextClassLoader);
     }
@@ -242,7 +246,10 @@ public class BasicRootedOzoneClientAdapterImpl
           OzoneVolume volume = proxy.getVolumeDetails(volumeStr);
           // Create the bucket
           try {
-            volume.createBucket(bucketStr);
+            // Buckets created by OFS should be in FSO layout
+            volume.createBucket(bucketStr,
+                BucketArgs.newBuilder().setBucketLayout(
+                    this.defaultOFSBucketLayout).build());
           } catch (OMException newBucEx) {
             // Ignore the case where another client created the bucket
             if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) {

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org