You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sa...@apache.org on 2020/07/22 13:00:40 UTC
[hadoop-ozone] 30/39: HDDS-3955. Unable to list intermediate paths
on keys created using S3G. (#1196)
This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch ozone-0.6.0
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit ba8966d0b109d67b8ac2c9b0604da02774482808
Author: Bharat Viswanadham <bh...@apache.org>
AuthorDate: Sat Jul 18 10:15:45 2020 -0700
HDDS-3955. Unable to list intermediate paths on keys created using S3G. (#1196)
(cherry picked from commit 715aed2d158d2c6708af8b6a9b8270766103ee52)
---
.../common/src/main/resources/ozone-default.xml | 15 ++
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 7 +
.../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 132 +++++++++++++++
.../org/apache/hadoop/ozone/om/OzoneManager.java | 8 +
.../hadoop/ozone/om/request/OMClientRequest.java | 76 +++++++++
.../ozone/om/request/file/OMFileRequest.java | 2 +-
.../om/request/key/OMAllocateBlockRequest.java | 6 +-
.../ozone/om/request/key/OMKeyCommitRequest.java | 4 +-
.../ozone/om/request/key/OMKeyCreateRequest.java | 68 +++++++-
.../ozone/om/request/key/OMKeyDeleteRequest.java | 4 +-
.../ozone/om/request/key/OMKeyRenameRequest.java | 17 +-
.../S3InitiateMultipartUploadRequest.java | 14 +-
.../multipart/S3MultipartUploadAbortRequest.java | 5 +-
.../S3MultipartUploadCommitPartRequest.java | 9 +-
.../S3MultipartUploadCompleteRequest.java | 5 +-
.../ozone/om/request/TestNormalizePaths.java | 109 ++++++++++++
.../om/request/key/TestOMKeyCreateRequest.java | 182 ++++++++++++++++++++-
.../TestS3InitiateMultipartUploadRequest.java | 2 +-
.../s3/multipart/TestS3MultipartRequest.java | 5 +-
.../TestS3MultipartUploadCommitPartRequest.java | 2 +-
20 files changed, 643 insertions(+), 29 deletions(-)
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d8fc591..b474ac3 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2478,4 +2478,19 @@
rules in Amazon S3's object key naming guide.
</description>
</property>
+
+ <property>
+ <name>ozone.om.enable.filesystem.paths</name>
+ <tag>OZONE, OM</tag>
+ <value>false</value>
+ <description>If true, key names will be interpreted as file system paths.
+ "/" will be treated as a special character and paths will be normalized
+ and must follow Unix filesystem path naming conventions. This flag will
+ be helpful when objects created by S3G need to be accessed using OFS/O3Fs.
+ If false, it will fallback to default behavior of Key/MPU create
+ requests where key paths are not normalized and any intermediate
+ directories will not be created or any file checks happens to check
+ filesystem semantics.
+ </description>
+ </property>
</configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 4f512a5..f16679a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -239,4 +239,11 @@ public final class OMConfigKeys {
"ozone.om.keyname.character.check.enabled";
public static final boolean OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT =
false;
+
+ // This config needs to be enabled, when S3G created objects used via
+ // FileSystem API.
+ public static final String OZONE_OM_ENABLE_FILESYSTEM_PATHS =
+ "ozone.om.enable.filesystem.paths";
+ public static final boolean OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT =
+ false;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
new file mode 100644
index 0000000..b872a3d
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.net.URI;
+import java.util.Arrays;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+
+/**
+ * Class tests create with object store and getFileStatus.
+ */
+public class TestOzoneFSWithObjectStoreCreate {
+
+ @Rule
+ public Timeout timeout = new Timeout(300000);
+
+ private String rootPath;
+
+ private MiniOzoneCluster cluster = null;
+
+ private OzoneFileSystem o3fs;
+
+ private String volumeName;
+
+ private String bucketName;
+
+
+ @Before
+ public void init() throws Exception {
+ volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+ bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+
+ conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(3)
+ .build();
+ cluster.waitForClusterToBeReady();
+
+ // create a volume and a bucket to be used by OzoneFileSystem
+ OzoneBucket bucket =
+ TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
+
+ rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName,
+ volumeName);
+ o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath), conf);
+ }
+
+
+ @Test
+ public void test() throws Exception {
+
+ OzoneVolume ozoneVolume =
+ cluster.getRpcClient().getObjectStore().getVolume(volumeName);
+
+ OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+
+ String key1 = "///dir1/dir2/file1";
+ String key2 = "///dir1/dir2/file2";
+ int length = 10;
+ OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key1, length);
+ byte[] b = new byte[10];
+ Arrays.fill(b, (byte)96);
+ ozoneOutputStream.write(b);
+ ozoneOutputStream.close();
+
+ ozoneOutputStream = ozoneBucket.createKey(key2, length);
+ ozoneOutputStream.write(b);
+ ozoneOutputStream.close();
+
+ // Adding "/" here otherwise Path will be considered as relative path and
+ // workingDir will be added.
+ key1 = "///dir1/dir2/file1";
+ Path p = new Path(key1);
+ Assert.assertTrue(o3fs.getFileStatus(p).isFile());
+
+ p = p.getParent();
+ checkAncestors(p);
+
+
+ key2 = "///dir1/dir2/file2";
+ p = new Path(key2);
+ Assert.assertTrue(o3fs.getFileStatus(p).isFile());
+ checkAncestors(p);
+
+ }
+
+ private void checkAncestors(Path p) throws Exception {
+ p = p.getParent();
+ while(p.getParent() != null) {
+ FileStatus fileStatus = o3fs.getFileStatus(p);
+ Assert.assertTrue(fileStatus.isDirectory());
+ p = p.getParent();
+ }
+ }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 8a49fa7..43ae998 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -200,6 +200,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE;
import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
@@ -3494,4 +3496,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
void setExitManagerForTesting(ExitManager exitManagerForTesting) {
this.exitManager = exitManagerForTesting;
}
+
+
+ public boolean getEnableFileSystemPaths() {
+ return configuration.getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+ OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 4ced9fd..0fa9ca1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.request;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -38,13 +39,18 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRespo
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.net.InetAddress;
+import java.nio.file.Paths;
import java.util.LinkedHashMap;
import java.util.Map;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
/**
* OMClientRequest provides methods which every write OM request should
@@ -52,6 +58,8 @@ import java.util.Map;
*/
public abstract class OMClientRequest implements RequestAuditor {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OMClientRequest.class);
private OMRequest omRequest;
/**
@@ -265,4 +273,72 @@ public abstract class OMClientRequest implements RequestAuditor {
auditMap.put(OzoneConsts.VOLUME, volume);
return auditMap;
}
+
+
+ public static String validateAndNormalizeKey(boolean enableFileSystemPaths,
+ String keyName) throws OMException {
+ if (enableFileSystemPaths) {
+ return validateAndNormalizeKey(keyName);
+ } else {
+ return keyName;
+ }
+ }
+
+ @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
+ public static String validateAndNormalizeKey(String keyName)
+ throws OMException {
+ String normalizedKeyName;
+ if (keyName.startsWith(OM_KEY_PREFIX)) {
+ normalizedKeyName = Paths.get(keyName).toUri().normalize().getPath();
+ } else {
+ normalizedKeyName = Paths.get(OM_KEY_PREFIX, keyName).toUri()
+ .normalize().getPath();
+ }
+ if (!keyName.equals(normalizedKeyName)) {
+ LOG.debug("Normalized key {} to {} ", keyName,
+ normalizedKeyName.substring(1));
+ }
+ return isValidKeyPath(normalizedKeyName.substring(1));
+ }
+
+ /**
+ * Whether the pathname is valid. Check key names which contain a
+ * ":", ".", "..", "//", "". If it has any of these characters throws
+ * OMException, else return the path.
+ */
+ private static String isValidKeyPath(String path) throws OMException {
+ boolean isValid = true;
+
+ // If keyName is empty string throw error.
+ if (path.length() == 0) {
+ throw new OMException("Invalid KeyPath, empty keyName" + path,
+ INVALID_KEY_NAME);
+ } else if(path.startsWith("/")) {
+ isValid = false;
+ } else {
+ // Check for ".." "." ":" "/"
+ String[] components = StringUtils.split(path, '/');
+ for (int i = 0; i < components.length; i++) {
+ String element = components[i];
+ if (element.equals(".") ||
+ (element.contains(":")) ||
+ (element.contains("/") || element.equals(".."))) {
+ isValid = false;
+ break;
+ }
+
+ // The string may end with a /, but not have
+ // "//" in the middle.
+ if (element.isEmpty() && i != components.length - 1) {
+ isValid = false;
+ }
+ }
+ }
+
+ if (isValid) {
+ return path;
+ } else {
+ throw new OMException("Invalid KeyPath " + path, INVALID_KEY_NAME);
+ }
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 3367ec7..21ffff8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -197,7 +197,7 @@ public final class OMFileRequest {
/**
* Return codes used by verifyFilesInPath method.
*/
- enum OMDirectoryResult {
+ public enum OMDirectoryResult {
// In below examples path is assumed as "a/b/c" in volume volume1 and
// bucket b1.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
index 9e82888..94d700f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
@@ -110,9 +110,11 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
ozoneManager.getPreallocateBlocksMax(),
ozoneManager.isGrpcBlockTokenEnabled(), ozoneManager.getOMNodeId());
- // Set modification time
+ // Set modification time and normalize key if required.
KeyArgs.Builder newKeyArgs = keyArgs.toBuilder()
- .setModificationTime(Time.now());
+ .setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
AllocateBlockRequest.Builder newAllocatedBlockRequest =
AllocateBlockRequest.newBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index eb3769b..dccb93b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -91,7 +91,9 @@ public class OMKeyCommitRequest extends OMKeyRequest {
}
KeyArgs.Builder newKeyArgs =
- keyArgs.toBuilder().setModificationTime(Time.now());
+ keyArgs.toBuilder().setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
return getOmRequest().toBuilder()
.setCommitKeyRequest(commitKeyRequest.toBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index f7f08dc..8927c19 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.om.request.key;
import java.io.IOException;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -27,7 +28,11 @@ import java.util.stream.Collectors;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -63,7 +68,10 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.UniqueId;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
/**
* Handles CreateKey request.
@@ -91,6 +99,20 @@ public class OMKeyCreateRequest extends OMKeyRequest {
if(checkKeyNameEnabled){
OmUtils.validateKeyName(keyArgs.getKeyName());
}
+
+ String keyPath = keyArgs.getKeyName();
+ if (ozoneManager.getEnableFileSystemPaths()) {
+ // If enabled, disallow keys with trailing /. As in fs semantics
+ // directories end with trailing /.
+ keyPath = validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(), keyPath);
+ if (keyPath.endsWith("/")) {
+ throw new OMException("Invalid KeyPath, key names with trailing / " +
+ "are not allowed." + keyPath,
+ OMException.ResultCodes.INVALID_KEY_NAME);
+ }
+ }
+
// We cannot allocate block for multipart upload part when
// createMultipartKey is called, as we will not know type and factor with
// which initiateMultipartUpload has started for this key. When
@@ -131,7 +153,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
// As for a client for the first time this can be executed on any OM,
// till leader is identified.
- List< OmKeyLocationInfo > omKeyLocationInfoList =
+ List<OmKeyLocationInfo> omKeyLocationInfoList =
allocateBlock(ozoneManager.getScmClient(),
ozoneManager.getBlockTokenSecretManager(), type, factor,
new ExcludeList(), requestedSize, scmBlockSize,
@@ -149,7 +171,10 @@ public class OMKeyCreateRequest extends OMKeyRequest {
newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now());
}
+ newKeyArgs.setKeyName(keyPath);
+
generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager);
+
newCreateKeyRequest =
createKeyRequest.toBuilder().setKeyArgs(newKeyArgs)
.setClientID(UniqueId.next());
@@ -160,6 +185,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
}
@Override
+ @SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest();
@@ -184,6 +210,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
getOmRequest());
IOException exception = null;
Result result = null;
+ List<OmKeyInfo> missingParentInfos = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
@@ -209,8 +236,41 @@ public class OMKeyCreateRequest extends OMKeyRequest {
OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
omMetadataManager.getBucketKey(volumeName, bucketName));
+ // If FILE_EXISTS we just override like how we used to do for Key Create.
+ List< OzoneAcl > inheritAcls;
+ if (ozoneManager.getEnableFileSystemPaths()) {
+ OMFileRequest.OMPathInfo pathInfo =
+ OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName,
+ bucketName, keyName, Paths.get(keyName));
+ OMFileRequest.OMDirectoryResult omDirectoryResult =
+ pathInfo.getDirectoryResult();
+ inheritAcls = pathInfo.getAcls();
+
+ // Check if a file or directory exists with same key name.
+ if (omDirectoryResult == DIRECTORY_EXISTS) {
+ throw new OMException("Cannot write to " +
+ "directory. createIntermediateDirs behavior is enabled and " +
+ "hence / has special interpretation: " + keyName, NOT_A_FILE);
+ } else
+ if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+ throw new OMException("Can not create file: " + keyName +
+ " as there is already file in the given path", NOT_A_FILE);
+ }
+
+ missingParentInfos = OMDirectoryCreateRequest
+ .getAllParentInfo(ozoneManager, keyArgs,
+ pathInfo.getMissingParents(), inheritAcls, trxnLogIndex);
+
+ // Add cache entries for the prefix directories.
+ // Skip adding for the file key itself, until Key Commit.
+ OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
+ bucketName, Optional.absent(), Optional.of(missingParentInfos),
+ trxnLogIndex);
+
+ }
+
omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, dbKeyInfo,
- keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs),
+ keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs),
ozoneManager.getPrefixManager(), bucketInfo, trxnLogIndex,
ozoneManager.isRatisEnabled());
@@ -238,7 +298,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
.setOpenVersion(openVersion).build())
.setCmdType(Type.CreateKey);
omClientResponse = new OMKeyCreateResponse(omResponse.build(),
- omKeyInfo, null, clientID);
+ omKeyInfo, missingParentInfos, clientID);
result = Result.SUCCESS;
} catch (IOException ex) {
@@ -269,7 +329,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
break;
case FAILURE:
LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " +
- "Exception:{}", volumeName, bucketName, keyName, exception);
+ "Exception:{}", volumeName, bucketName, keyName, exception);
break;
default:
LOG.error("Unrecognized Result for OMKeyCreateRequest: {}",
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 8b75417..4d8562c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -75,7 +75,9 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs();
OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs =
- keyArgs.toBuilder().setModificationTime(Time.now());
+ keyArgs.toBuilder().setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
return getOmRequest().toBuilder()
.setDeleteKeyRequest(deleteKeyRequest.toBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
index 91db347..e6e9839 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
@@ -85,13 +85,22 @@ public class OMKeyRenameRequest extends OMKeyRequest {
OmUtils.validateKeyName(renameKeyRequest.getToKeyName());
}
- // Set modification time.
- KeyArgs.Builder newKeyArgs = renameKeyRequest.getKeyArgs().toBuilder()
- .setModificationTime(Time.now());
+ KeyArgs renameKeyArgs = renameKeyRequest.getKeyArgs();
+
+ // Set modification time and normalize key if needed.
+ KeyArgs.Builder newKeyArgs = renameKeyArgs.toBuilder()
+ .setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(),
+ renameKeyArgs.getKeyName()));
return getOmRequest().toBuilder()
.setRenameKeyRequest(renameKeyRequest.toBuilder()
- .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
+ .setKeyArgs(newKeyArgs)
+ .setToKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(),
+ renameKeyRequest.getToKeyName())))
+ .setUserInfo(getUserInfo()).build();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index aa96ba9..f7951a2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
@@ -67,15 +67,17 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
}
@Override
- public OMRequest preExecute(OzoneManager ozoneManager) {
+ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
MultipartInfoInitiateRequest multipartInfoInitiateRequest =
getOmRequest().getInitiateMultiPartUploadRequest();
Preconditions.checkNotNull(multipartInfoInitiateRequest);
- OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs =
- multipartInfoInitiateRequest.getKeyArgs().toBuilder()
+ KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
+ KeyArgs.Builder newKeyArgs = keyArgs.toBuilder()
.setMultipartUploadID(UUID.randomUUID().toString() + "-" +
- UniqueId.next()).setModificationTime(Time.now());
+ UniqueId.next()).setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
return getOmRequest().toBuilder()
.setUserInfo(getUserInfo())
@@ -92,7 +94,7 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
MultipartInfoInitiateRequest multipartInfoInitiateRequest =
getOmRequest().getInitiateMultiPartUploadRequest();
- OzoneManagerProtocolProtos.KeyArgs keyArgs =
+ KeyArgs keyArgs =
multipartInfoInitiateRequest.getKeyArgs();
Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 0726fe4..c0ef8b3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -73,7 +73,10 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
return getOmRequest().toBuilder().setAbortMultiPartUploadRequest(
getOmRequest().getAbortMultiPartUploadRequest().toBuilder()
- .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())))
+ .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(),
+ keyArgs.getKeyName()))))
.setUserInfo(getUserInfo()).build();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index 283a22d..1e29d5f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -70,14 +70,17 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
}
@Override
- public OMRequest preExecute(OzoneManager ozoneManager) {
+ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
getOmRequest().getCommitMultiPartUploadRequest();
+ KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(
multipartCommitUploadPartRequest.toBuilder()
- .setKeyArgs(multipartCommitUploadPartRequest.getKeyArgs()
- .toBuilder().setModificationTime(Time.now())))
+ .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(),
+ keyArgs.getKeyName()))))
.setUserInfo(getUserInfo()).build();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index a9aefa0..83cc28b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -81,7 +81,10 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
return getOmRequest().toBuilder()
.setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest
.toBuilder().setKeyArgs(keyArgs.toBuilder()
- .setModificationTime(Time.now())))
+ .setModificationTime(Time.now())
+ .setKeyName(validateAndNormalizeKey(
+ ozoneManager.getEnableFileSystemPaths(),
+ keyArgs.getKeyName()))))
.setUserInfo(getUserInfo()).build();
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java
new file mode 100644
index 0000000..6137444
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request;
+
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import static org.apache.hadoop.ozone.om.request.OMClientRequest.validateAndNormalizeKey;
+import static org.junit.Assert.fail;
+
+/**
+ * Class to test normalize paths.
+ */
+public class TestNormalizePaths {
+
+ @Rule
+ public ExpectedException exceptionRule = ExpectedException.none();
+
+ @Test
+ public void testNormalizePathsEnabled() throws Exception {
+
+ Assert.assertEquals("a/b/c/d",
+ validateAndNormalizeKey(true, "a/b/c/d"));
+ Assert.assertEquals("a/b/c/d",
+ validateAndNormalizeKey(true, "/a/b/c/d"));
+ Assert.assertEquals("a/b/c/d",
+ validateAndNormalizeKey(true, "////a/b/c/d"));
+ Assert.assertEquals("a/b/c/d",
+ validateAndNormalizeKey(true, "////a/b/////c/d"));
+ Assert.assertEquals("a/b/c/...../d",
+ validateAndNormalizeKey(true, "////a/b/////c/...../d"));
+ Assert.assertEquals("a/b/d",
+ validateAndNormalizeKey(true, "/a/b/c/../d"));
+ Assert.assertEquals("a",
+ validateAndNormalizeKey(true, "a"));
+ Assert.assertEquals("a/b",
+ validateAndNormalizeKey(true, "/a/./b"));
+ Assert.assertEquals("a/b",
+ validateAndNormalizeKey(true, ".//a/./b"));
+ Assert.assertEquals("a/",
+ validateAndNormalizeKey(true, "/a/."));
+ Assert.assertEquals("b/c",
+ validateAndNormalizeKey(true, "//./b/c/"));
+ Assert.assertEquals("a/b/c/d",
+ validateAndNormalizeKey(true, "a/b/c/d/"));
+ Assert.assertEquals("a/b/c/...../d",
+ validateAndNormalizeKey(true, "////a/b/////c/...../d/"));
+ }
+
+ @Test
+ public void testNormalizeKeyInvalidPaths() throws OMException {
+ checkInvalidPath("/a/b/c/../../../../../d");
+ checkInvalidPath("../a/b/c/");
+ checkInvalidPath("/../..a/b/c/");
+ checkInvalidPath("//");
+ checkInvalidPath("/////");
+ checkInvalidPath("");
+ checkInvalidPath("/");
+ checkInvalidPath("/:/:");
+ }
+
+ private void checkInvalidPath(String keyName) {
+ try {
+ validateAndNormalizeKey(true, keyName);
+ fail("checkInvalidPath failed for path " + keyName);
+ } catch (OMException ex) {
+ Assert.assertTrue(ex.getMessage().contains("Invalid KeyPath"));
+ }
+ }
+
+
+
+ @Test
+ public void testNormalizePathsDisable() throws OMException {
+
+ Assert.assertEquals("/a/b/c/d",
+ validateAndNormalizeKey(false, "/a/b/c/d"));
+ Assert.assertEquals("////a/b/c/d",
+ validateAndNormalizeKey(false, "////a/b/c/d"));
+ Assert.assertEquals("////a/b/////c/d",
+ validateAndNormalizeKey(false, "////a/b/////c/d"));
+ Assert.assertEquals("////a/b/////c/...../d",
+ validateAndNormalizeKey(false, "////a/b/////c/...../d"));
+ Assert.assertEquals("/a/b/c/../d",
+ validateAndNormalizeKey(false, "/a/b/c/../d"));
+ Assert.assertEquals("/a/b/c/../../d",
+ validateAndNormalizeKey(false, "/a/b/c/../../d"));
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index b26505b..2b8ffce 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -18,10 +18,15 @@
package org.apache.hadoop.ozone.om.request.key;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.List;
import java.util.UUID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.junit.Assert;
import org.junit.Test;
@@ -37,7 +42,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.when;
/**
* Tests OMCreateKeyRequest class.
@@ -82,7 +94,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
- Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+ Assert.assertEquals(OK,
omKeyCreateResponse.getOMResponse().getStatus());
// Check open table whether key is added or not.
@@ -310,6 +322,11 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
@SuppressWarnings("parameterNumber")
private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) {
+ return createKeyRequest(isMultipartKey, partNumber, keyName);
+ }
+
+ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber,
+ String keyName) {
KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
.setVolumeName(volumeName).setBucketName(bucketName)
@@ -327,7 +344,170 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
.setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
.setClientId(UUID.randomUUID().toString())
.setCreateKeyRequest(createKeyRequest).build();
+ }
+
+ @Test
+ public void testKeyCreateWithFileSystemPathsEnabled() throws Exception {
+
+ OzoneConfiguration configuration = new OzoneConfiguration();
+ configuration.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+ when(ozoneManager.getConfiguration()).thenReturn(configuration);
+ when(ozoneManager.getEnableFileSystemPaths()).thenReturn(true);
+
+ // Add volume and bucket entries to DB.
+ addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+
+ keyName = "dir1/dir2/dir3/file1";
+ createAndCheck(keyName);
+
+ // Key with leading '/'.
+ String keyName = "/a/b/c/file1";
+ createAndCheck(keyName);
+
+ // Commit openKey entry.
+ TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+ keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+
+ // Now create another file in same dir path.
+ keyName = "/a/b/c/file2";
+ createAndCheck(keyName);
+
+ // Create key with multiple /'s
+ // converted to a/b/c/file5
+ keyName = "///a/b///c///file5";
+ createAndCheck(keyName);
+
+ // converted to a/b/c/.../file3
+ keyName = "///a/b///c//.../file3";
+ createAndCheck(keyName);
+
+ // converted to r1/r2
+ keyName = "././r1/r2/";
+ createAndCheck(keyName);
+
+ // converted to ..d1/d2/d3
+ keyName = "..d1/d2/d3/";
+ createAndCheck(keyName);
+
+ // Create a file, where a file already exists in the path.
+ // Now try with a file exists in path. Should fail.
+ keyName = "/a/b/c/file1/file3";
+ checkNotAFile(keyName);
+
+ // Empty keyName.
+ keyName = "";
+ checkNotAValidPath(keyName);
+
+ // Key name ends with /
+ keyName = "/a/./";
+ checkNotAValidPath(keyName);
+
+ keyName = "/////";
+ checkNotAValidPath(keyName);
+
+ keyName = "../../b/c";
+ checkNotAValidPath(keyName);
+
+ keyName = "../../b/c/";
+ checkNotAValidPath(keyName);
+
+ keyName = "../../b:/c/";
+ checkNotAValidPath(keyName);
+
+ keyName = ":/c/";
+ checkNotAValidPath(keyName);
+
+ keyName = "";
+ checkNotAValidPath(keyName);
+
+ keyName = "../a/b";
+ checkNotAValidPath(keyName);
+
+ keyName = "/../a/b";
+ checkNotAValidPath(keyName);
+
+ }
+
+ private void checkNotAValidPath(String keyName) {
+ OMRequest omRequest = createKeyRequest(false, 0, keyName);
+ OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+
+ try {
+ omKeyCreateRequest.preExecute(ozoneManager);
+ fail("checkNotAValidPath failed for path" + keyName);
+ } catch (IOException ex) {
+ Assert.assertTrue(ex instanceof OMException);
+ OMException omException = (OMException) ex;
+ Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+ omException.getResult());
+ }
+
+
+ }
+ private void checkNotAFile(String keyName) throws Exception {
+ OMRequest omRequest = createKeyRequest(false, 0, keyName);
+
+ OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+
+ omRequest = omKeyCreateRequest.preExecute(ozoneManager);
+
+ omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+
+ OMClientResponse omClientResponse =
+ omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
+ 101L, ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(NOT_A_FILE,
+ omClientResponse.getOMResponse().getStatus());
+ }
+
+
+ private void createAndCheck(String keyName) throws Exception {
+ OMRequest omRequest = createKeyRequest(false, 0, keyName);
+
+ OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+
+ omRequest = omKeyCreateRequest.preExecute(ozoneManager);
+
+ omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+
+ OMClientResponse omClientResponse =
+ omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
+ 101L, ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(OK, omClientResponse.getOMResponse().getStatus());
+
+ checkCreatedPaths(omKeyCreateRequest, omRequest, keyName);
+ }
+
+ private void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+ OMRequest omRequest, String keyName) throws Exception {
+ keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
+ // Check intermediate directories created or not.
+ Path keyPath = Paths.get(keyName);
+ checkIntermediatePaths(keyPath);
+
+ // Check open key entry
+ String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+ keyName, omRequest.getCreateKeyRequest().getClientID());
+ OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+ Assert.assertNotNull(omKeyInfo);
+ }
+
+
+
+ private void checkIntermediatePaths(Path keyPath) throws Exception {
+ // Check intermediate paths are created
+ keyPath = keyPath.getParent();
+ while(keyPath != null) {
+ Assert.assertNotNull(omMetadataManager.getKeyTable().get(
+ omMetadataManager.getOzoneDirKey(volumeName, bucketName,
+ keyPath.toString())));
+ keyPath = keyPath.getParent();
+ }
}
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
index 1d78560..5633c72 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
@@ -36,7 +36,7 @@ public class TestS3InitiateMultipartUploadRequest
extends TestS3MultipartRequest {
@Test
- public void testPreExecute() {
+ public void testPreExecute() throws Exception {
doPreExecuteInitiateMPU(UUID.randomUUID().toString(),
UUID.randomUUID().toString(), UUID.randomUUID().toString());
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 0271a7a..f0f040f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -107,7 +107,7 @@ public class TestS3MultipartRequest {
* @return OMRequest - returned from preExecute.
*/
protected OMRequest doPreExecuteInitiateMPU(
- String volumeName, String bucketName, String keyName) {
+ String volumeName, String bucketName, String keyName) throws Exception {
OMRequest omRequest =
TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
keyName);
@@ -141,7 +141,8 @@ public class TestS3MultipartRequest {
*/
protected OMRequest doPreExecuteCommitMPU(
String volumeName, String bucketName, String keyName,
- long clientID, String multipartUploadID, int partNumber) {
+ long clientID, String multipartUploadID, int partNumber)
+ throws Exception {
// Just set dummy size
long dataSize = 100L;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index 5b220bf..d623b17 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -38,7 +38,7 @@ public class TestS3MultipartUploadCommitPartRequest
extends TestS3MultipartRequest {
@Test
- public void testPreExecute() {
+ public void testPreExecute() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org