You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by we...@apache.org on 2023/04/26 17:30:44 UTC
[ozone] branch master updated: HDDS-7586. Allow user to create bucket with non-s3-naming-convention (#4524)
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 298e7eee13 HDDS-7586. Allow user to create bucket with non-s3-naming-convention (#4524)
298e7eee13 is described below
commit 298e7eee13509f89502c516bc11919ead7b34333
Author: DaveTeng0 <10...@users.noreply.github.com>
AuthorDate: Wed Apr 26 10:30:37 2023 -0700
HDDS-7586. Allow user to create bucket with non-s3-naming-convention (#4524)
Reviewed-by: Doroszlai, Attila <ad...@apache.org>
---
.../hadoop/hdds/scm/client/HddsClientUtils.java | 27 ++++++++--
.../common/src/main/resources/ozone-default.xml | 9 ++++
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 32 ------------
.../main/java/org/apache/hadoop/ozone/OmUtils.java | 4 +-
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 6 +++
.../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 2 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 3 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 12 ++++-
.../om/request/bucket/OMBucketCreateRequest.java | 3 +-
.../request/bucket/TestOMBucketCreateRequest.java | 57 +++++++++++++++++++++-
.../bucket/TestOMBucketCreateRequestWithFSO.java | 2 +-
11 files changed, 110 insertions(+), 47 deletions(-)
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 7a61f8eed5..f744737aef 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -127,17 +127,23 @@ public final class HddsClientUtils {
}
}
- private static boolean isSupportedCharacter(char c) {
+ private static boolean isSupportedCharacter(char c, boolean isStrictS3) {
+ // When isStrictS3 is set as false,
+ // ozone allows namespace to follow other volume/bucket naming convention,
+ // for example, here supports '_',
+ // which is a valid character in POSIX-compliant system, like HDFS.
return (c == '.' || c == '-' ||
- Character.isLowerCase(c) || Character.isDigit(c));
+ Character.isLowerCase(c) || Character.isDigit(c)) ||
+ (c == '_' && !isStrictS3);
}
- private static void doCharacterChecks(char currChar, char prev) {
+ private static void doCharacterChecks(char currChar, char prev,
+ boolean isStrictS3) {
if (Character.isUpperCase(currChar)) {
throw new IllegalArgumentException(
"Bucket or Volume name does not support uppercase characters");
}
- if (!isSupportedCharacter(currChar)) {
+ if (!isSupportedCharacter(currChar, isStrictS3)) {
throw new IllegalArgumentException("Bucket or Volume name has an " +
"unsupported character : " + currChar);
}
@@ -163,6 +169,17 @@ public final class HddsClientUtils {
* @throws IllegalArgumentException
*/
public static void verifyResourceName(String resName) {
+ verifyResourceName(resName, true);
+ }
+
+ /**
+ * verifies that bucket name / volume name is a valid DNS name.
+ *
+ * @param resName Bucket or volume Name to be validated
+ *
+ * @throws IllegalArgumentException
+ */
+ public static void verifyResourceName(String resName, boolean isStrictS3) {
doNameChecks(resName);
@@ -174,7 +191,7 @@ public final class HddsClientUtils {
if (currChar != '.') {
isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4;
}
- doCharacterChecks(currChar, prev);
+ doCharacterChecks(currChar, prev, isStrictS3);
prev = currChar;
}
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 87c54aa8d5..906ac12126 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -3531,6 +3531,15 @@
also allows interoperability between S3 and FS APIs. Keys written via S3 API with a "/" delimiter
will create intermediate directories.
</description>
+ </property>
+ <property>
+ <name>ozone.om.namespace.s3.strict</name>
+ <value>true</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Ozone namespace should follow S3 naming rule by default.
+ However this parameter allows the namespace to support non-S3 compatible characters.
+ </description>
</property>
<property>
<name>ozone.om.snapshot.cache.max.size</name>
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 280afc05e8..d965fd7b90 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -620,7 +620,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, BucketArgs bucketArgs)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(bucketArgs);
verifyCountsQuota(bucketArgs.getQuotaInNamespace());
verifySpaceQuota(bucketArgs.getQuotaInBytes());
@@ -715,15 +714,6 @@ public class RpcClient implements ClientProtocol {
}
}
- private static void verifyBucketName(String bucketName) throws OMException {
- try {
- HddsClientUtils.verifyResourceName(bucketName);
- } catch (IllegalArgumentException e) {
- throw new OMException(e.getMessage(),
- OMException.ResultCodes.INVALID_BUCKET_NAME);
- }
- }
-
private static void verifyCountsQuota(long quota) throws OMException {
if (quota < OzoneConsts.QUOTA_RESET || quota == 0) {
throw new IllegalArgumentException("Invalid values for quota : " +
@@ -1081,7 +1071,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, Boolean versioning)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(versioning);
OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
builder.setVolumeName(volumeName)
@@ -1095,7 +1084,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, StorageType storageType)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(storageType);
OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
builder.setVolumeName(volumeName)
@@ -1107,7 +1095,6 @@ public class RpcClient implements ClientProtocol {
@Override
public void setBucketQuota(String volumeName, String bucketName,
long quotaInNamespace, long quotaInBytes) throws IOException {
- HddsClientUtils.verifyResourceName(bucketName);
HddsClientUtils.verifyResourceName(volumeName);
verifyCountsQuota(quotaInNamespace);
verifySpaceQuota(quotaInBytes);
@@ -1135,7 +1122,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, ReplicationConfig replicationConfig)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(replicationConfig);
if (omVersion
.compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) {
@@ -1158,7 +1144,6 @@ public class RpcClient implements ClientProtocol {
public void deleteBucket(
String volumeName, String bucketName) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
ozoneManagerClient.deleteBucket(volumeName, bucketName);
}
@@ -1172,7 +1157,6 @@ public class RpcClient implements ClientProtocol {
public OzoneBucket getBucketDetails(
String volumeName, String bucketName) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
OmBucketInfo bucketInfo =
ozoneManagerClient.getBucketInfo(volumeName, bucketName);
return OzoneBucket.newBuilder(conf, this)
@@ -1246,7 +1230,6 @@ public class RpcClient implements ClientProtocol {
Map<String, String> metadata)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
@@ -1304,7 +1287,6 @@ public class RpcClient implements ClientProtocol {
Map<String, String> metadata)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
@@ -1379,7 +1361,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, String keyName)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyInfo keyInfo = getKeyInfo(volumeName, bucketName, keyName, false);
return getInputStreamWithRetryFunction(keyInfo);
@@ -1395,7 +1376,6 @@ public class RpcClient implements ClientProtocol {
= new LinkedHashMap<>();
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
OmKeyInfo keyInfo = getKeyInfo(volumeName, bucketName, keyName, true);
List<OmKeyLocationInfo> keyLocationInfos
= keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
@@ -1464,7 +1444,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, String keyName, boolean recursive)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@@ -1490,7 +1469,6 @@ public class RpcClient implements ClientProtocol {
public void renameKey(String volumeName, String bucketName,
String fromKeyName, String toKeyName) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(toKeyName);
}
@@ -1508,7 +1486,6 @@ public class RpcClient implements ClientProtocol {
public void renameKeys(String volumeName, String bucketName,
Map<String, String> keyMap) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(keyMap);
OmRenameKeys omRenameKeys =
new OmRenameKeys(volumeName, bucketName, keyMap, null);
@@ -1597,7 +1574,6 @@ public class RpcClient implements ClientProtocol {
@NotNull
private OmKeyInfo getS3KeyInfo(
String bucketName, String keyName, boolean isHeadOp) throws IOException {
- verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
@@ -1673,7 +1649,6 @@ public class RpcClient implements ClientProtocol {
ReplicationConfig replicationConfig)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(keyName);
if (omVersion
.compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) {
@@ -1705,7 +1680,6 @@ public class RpcClient implements ClientProtocol {
String uploadID)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
@@ -1758,7 +1732,6 @@ public class RpcClient implements ClientProtocol {
String uploadID)
throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
@@ -1817,7 +1790,6 @@ public class RpcClient implements ClientProtocol {
String volumeName, String bucketName, String keyName, String uploadID,
Map<Integer, String> partsMap) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(keyName, uploadID);
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
@@ -1844,7 +1816,6 @@ public class RpcClient implements ClientProtocol {
public void abortMultipartUpload(String volumeName,
String bucketName, String keyName, String uploadID) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(keyName, uploadID);
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@@ -1860,7 +1831,6 @@ public class RpcClient implements ClientProtocol {
String bucketName, String keyName, String uploadID, int partNumberMarker,
int maxParts) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(uploadID);
Preconditions.checkArgument(maxParts > 0, "Max Parts Should be greater " +
"than zero");
@@ -2305,7 +2275,6 @@ public class RpcClient implements ClientProtocol {
public OzoneKey headObject(String volumeName, String bucketName,
String keyName) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@@ -2347,7 +2316,6 @@ public class RpcClient implements ClientProtocol {
public boolean setBucketOwner(String volumeName, String bucketName,
String owner) throws IOException {
verifyVolumeName(volumeName);
- verifyBucketName(bucketName);
Preconditions.checkNotNull(owner);
OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
builder.setVolumeName(volumeName)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 23abf76a7e..358f8ff3f9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -499,10 +499,10 @@ public final class OmUtils {
/**
* Verify bucket name is a valid DNS name.
*/
- public static void validateBucketName(String bucketName)
+ public static void validateBucketName(String bucketName, boolean isStrictS3)
throws OMException {
try {
- HddsClientUtils.verifyResourceName(bucketName);
+ HddsClientUtils.verifyResourceName(bucketName, isStrictS3);
} catch (IllegalArgumentException e) {
throw new OMException("Invalid bucket name: " + bucketName,
OMException.ResultCodes.INVALID_BUCKET_NAME);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 83d31fd227..caf932cedd 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -368,6 +368,12 @@ public final class OMConfigKeys {
"ozone.om.s3.grpc.server_enabled";
public static final boolean OZONE_OM_S3_GRPC_SERVER_ENABLED_DEFAULT =
true;
+
+ public static final String OZONE_OM_NAMESPACE_STRICT_S3 =
+ "ozone.om.namespace.s3.strict";
+ public static final boolean OZONE_OM_NAMESPACE_STRICT_S3_DEFAULT =
+ true;
+
/**
* Configuration properties for OMAdminProtcol service.
*/
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 56f6db0e6e..1f8c86ec8c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -2094,7 +2094,7 @@ public class TestRootedOzoneFileSystem {
String bucketNameLocal = RandomStringUtils.randomNumeric(5);
Path volume = new Path("/" + volumeNameLocal);
ofs.mkdirs(volume);
- LambdaTestUtils.intercept(OMException.class,
+ LambdaTestUtils.intercept(FileNotFoundException.class,
() -> ofs.getFileStatus(new Path(volume, bucketNameLocal)));
// Cleanup
ofs.delete(volume, true);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 464eac7f64..388c59c628 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -702,8 +702,7 @@ public abstract class TestOzoneRpcClientAbstract {
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
LambdaTestUtils.intercept(OMException.class,
- "Bucket or Volume name has an unsupported" +
- " character : #",
+ "Invalid bucket name: invalid#bucket",
() -> volume.createBucket(bucketName));
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 14a7a0c815..1bdfb3140a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -267,6 +267,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTE
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_S3_GRPC_SERVER_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NAMESPACE_STRICT_S3;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NAMESPACE_STRICT_S3_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED;
@@ -416,7 +418,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
private ReplicationConfig defaultReplicationConfig;
private boolean isS3MultiTenancyEnabled;
-
+ private boolean isStrictS3;
private ExitManager exitManager;
private OzoneManagerPrepareState prepareState;
@@ -531,6 +533,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
HDDS_BLOCK_TOKEN_ENABLED_DEFAULT);
this.useRatisForReplication = conf.getBoolean(
DFS_CONTAINER_RATIS_ENABLED_KEY, DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
+ this.isStrictS3 = conf.getBoolean(
+ OZONE_OM_NAMESPACE_STRICT_S3,
+ OZONE_OM_NAMESPACE_STRICT_S3_DEFAULT);
+
// TODO: This is a temporary check. Once fully implemented, all OM state
// change should go through Ratis - be it standalone (for non-HA) or
// replicated (for HA).
@@ -861,6 +867,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
return isS3MultiTenancyEnabled;
}
+ public boolean isStrictS3() {
+ return isStrictS3;
+ }
+
/**
* Throws OMException FEATURE_NOT_ENABLED if S3 multi-tenancy is not enabled.
*/
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 3502616a92..49c2258927 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -93,7 +93,8 @@ public class OMBucketCreateRequest extends OMClientRequest {
getOmRequest().getCreateBucketRequest();
BucketInfo bucketInfo = createBucketRequest.getBucketInfo();
// Verify resource name
- OmUtils.validateBucketName(bucketInfo.getBucketName());
+ OmUtils.validateBucketName(bucketInfo.getBucketName(),
+ ozoneManager.isStrictS3());
// Get KMS provider.
KeyProviderCryptoExtension kmsProvider =
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
index f860b559a0..47b729d478 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.util.Time;
import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder;
import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest;
import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.when;
/**
* Tests OMBucketCreateRequest class, which handles CreateBucket request.
@@ -240,7 +241,59 @@ public class TestOMBucketCreateRequest extends TestBucketRequest {
OMException.ResultCodes.QUOTA_ERROR.toString());
}
- private OMBucketCreateRequest doPreExecute(String volumeName,
+ @Test
+ public void
+ testAcceptS3CompliantBucketNameCreationRegardlessOfStrictS3Setting()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ boolean[] omStrictS3Configs = {true, false};
+ for (boolean isStrictS3 : omStrictS3Configs) {
+ when(ozoneManager.isStrictS3()).thenReturn(isStrictS3);
+ String bucketName = UUID.randomUUID().toString();
+ acceptBucketCreationHelper(volumeName, bucketName);
+ }
+ }
+
+ @Test
+ public void testRejectNonS3CompliantBucketNameCreationWithStrictS3True()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String[] nonS3CompliantBucketName =
+ {"bucket_underscore", "_bucket___multi_underscore_", "bucket_"};
+ when(ozoneManager.isStrictS3()).thenReturn(true);
+ for (String bucketName : nonS3CompliantBucketName) {
+ rejectBucketCreationHelper(volumeName, bucketName);
+ }
+ }
+
+ @Test
+ public void testAcceptNonS3CompliantBucketNameCreationWithStrictS3False()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String[] nonS3CompliantBucketName =
+ {"bucket_underscore", "_bucket___multi_underscore_", "bucket_"};
+ when(ozoneManager.isStrictS3()).thenReturn(false);
+ for (String bucketName : nonS3CompliantBucketName) {
+ acceptBucketCreationHelper(volumeName, bucketName);
+ }
+ }
+
+ private void acceptBucketCreationHelper(String volumeName, String bucketName)
+ throws Exception {
+ OMBucketCreateRequest omBucketCreateRequest =
+ doPreExecute(volumeName, bucketName);
+ doValidateAndUpdateCache(volumeName, bucketName,
+ omBucketCreateRequest.getOmRequest());
+ }
+
+ private void rejectBucketCreationHelper(String volumeName,
+ String bucketName) {
+ Throwable e = assertThrows(OMException.class, () ->
+ doPreExecute(volumeName, bucketName));
+ Assert.assertEquals(e.getMessage(), "Invalid bucket name: " + bucketName);
+ }
+
+ protected OMBucketCreateRequest doPreExecute(String volumeName,
String bucketName) throws Exception {
return doPreExecute(newBucketInfoBuilder(bucketName, volumeName));
}
@@ -261,7 +314,7 @@ public class TestOMBucketCreateRequest extends TestBucketRequest {
return new OMBucketCreateRequest(modifiedRequest);
}
- private void doValidateAndUpdateCache(String volumeName, String bucketName,
+ protected void doValidateAndUpdateCache(String volumeName, String bucketName,
OMRequest modifiedRequest) throws Exception {
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
index 230cb73cbc..aa4fcdf58a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
@@ -130,7 +130,7 @@ public class TestOMBucketCreateRequestWithFSO
return new OMBucketCreateRequest(modifiedRequest);
}
- private void doValidateAndUpdateCache(String volumeName, String bucketName,
+ protected void doValidateAndUpdateCache(String volumeName, String bucketName,
OMRequest modifiedRequest) throws Exception {
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org