You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/02/25 06:20:36 UTC
[ozone] 16/19: HDDS-4781. [FSO]S3MultiPart: Implement create and
commit upload part file (#1897)
This is an automated email from the ASF dual-hosted git repository.
rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git
commit ee4fd7d8050f41b7b433b22b87e9849594a84afb
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Feb 10 15:00:32 2021 +0530
HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
---
.../apache/hadoop/fs/ozone/TestOzoneFileOps.java | 67 ------
.../rpc/TestOzoneClientMultipartUploadV1.java | 93 +++++++++
.../om/ratis/utils/OzoneManagerRatisUtils.java | 4 +
.../hadoop/ozone/om/request/key/OMKeyRequest.java | 14 +-
.../S3InitiateMultipartUploadRequest.java | 12 +-
.../S3InitiateMultipartUploadRequestV1.java | 25 +--
.../S3MultipartUploadCommitPartRequest.java | 35 +++-
...a => S3MultipartUploadCommitPartRequestV1.java} | 114 ++++-------
.../S3MultipartUploadCommitPartResponseV1.java | 66 ++++++
.../s3/multipart/TestS3MultipartRequest.java | 14 +-
.../TestS3MultipartUploadCommitPartRequest.java | 62 ++++--
.../TestS3MultipartUploadCommitPartRequestV1.java | 104 ++++++++++
.../s3/multipart/TestS3MultipartResponse.java | 76 +++++++
.../TestS3MultipartUploadCommitPartResponseV1.java | 226 +++++++++++++++++++++
14 files changed, 712 insertions(+), 200 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index 176d0c4..147a9ce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -37,7 +35,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Assert;
@@ -125,34 +122,6 @@ public class TestOzoneFileOps {
omMgr);
openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
- // verify entries in directory table
- TableIterator<String, ? extends
- Table.KeyValue<String, OmDirectoryInfo>> iterator =
- omMgr.getDirectoryTable().iterator();
- iterator.seekToFirst();
- int count = dirKeys.size();
- Assert.assertEquals("Unexpected directory table entries!", 2, count);
- while (iterator.hasNext()) {
- count--;
- Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
- verifyKeyFormat(value.getKey(), dirKeys);
- }
- Assert.assertEquals("Unexpected directory table entries!", 0, count);
-
- // verify entries in open key table
- TableIterator<String, ? extends
- Table.KeyValue<String, OmKeyInfo>> keysItr =
- omMgr.getOpenKeyTable().iterator();
- keysItr.seekToFirst();
-
- while (keysItr.hasNext()) {
- count++;
- Table.KeyValue<String, OmKeyInfo> value = keysItr.next();
- verifyOpenKeyFormat(value.getKey(), openFileKey);
- verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
- }
- Assert.assertEquals("Unexpected file table entries!", 1, count);
-
// trigger CommitKeyRequest
outputStream.close();
@@ -183,42 +152,6 @@ public class TestOzoneFileOps {
omKeyInfo.getPath());
}
- /**
- * Verify key name format and the DB key existence in the expected dirKeys
- * list.
- *
- * @param key table keyName
- * @param dirKeys expected keyName
- */
- private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
- String[] keyParts = StringUtils.split(key,
- OzoneConsts.OM_KEY_PREFIX.charAt(0));
- Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
- boolean removed = dirKeys.remove(key);
- Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
- removed);
- }
-
- /**
- * Verify key name format and the DB key existence in the expected
- * openFileKeys list.
- *
- * @param key table keyName
- * @param openFileKey expected keyName
- */
- private void verifyOpenKeyFormat(String key, String openFileKey) {
- String[] keyParts = StringUtils.split(key,
- OzoneConsts.OM_KEY_PREFIX.charAt(0));
- Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
- String[] expectedOpenFileParts = StringUtils.split(openFileKey,
- OzoneConsts.OM_KEY_PREFIX.charAt(0));
- Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
- + " doesn't exists in openFileTable!",
- expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
- + expectedOpenFileParts[1],
- keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
- }
-
long verifyDirKey(long parentId, String dirKey, String absolutePath,
ArrayList<String> dirKeys, OMMetadataManager omMgr)
throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index 93e5826..af241c5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.client.rpc;
+import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -24,8 +25,13 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
@@ -179,4 +185,91 @@ public class TestOzoneClientMultipartUploadV1 {
assertNotNull(multipartInfo.getUploadID());
}
+ @Test
+ public void testUploadPartWithNoOverride() throws IOException {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+ String sampleData = "sample Value";
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+ OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+ STAND_ALONE, ONE);
+
+ assertNotNull(multipartInfo);
+ String uploadID = multipartInfo.getUploadID();
+ Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+ Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+ Assert.assertEquals(keyName, multipartInfo.getKeyName());
+ assertNotNull(multipartInfo.getUploadID());
+
+ OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+ sampleData.length(), 1, uploadID);
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+ ozoneOutputStream.close();
+
+ OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+ .getCommitUploadPartInfo();
+
+ assertNotNull(commitUploadPartInfo);
+ assertNotNull(commitUploadPartInfo.getPartName());
+ }
+
+ @Test
+ public void testUploadPartOverrideWithRatis() throws IOException {
+
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+ String sampleData = "sample Value";
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+ OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+ ReplicationType.RATIS, THREE);
+
+ assertNotNull(multipartInfo);
+ String uploadID = multipartInfo.getUploadID();
+ Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+ Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+ Assert.assertEquals(keyName, multipartInfo.getKeyName());
+ assertNotNull(multipartInfo.getUploadID());
+
+ int partNumber = 1;
+
+ OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+ sampleData.length(), partNumber, uploadID);
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+ ozoneOutputStream.close();
+
+ OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+ .getCommitUploadPartInfo();
+
+ assertNotNull(commitUploadPartInfo);
+ String partName = commitUploadPartInfo.getPartName();
+ assertNotNull(commitUploadPartInfo.getPartName());
+
+ //Overwrite the part by creating part key with same part number.
+ sampleData = "sample Data Changed";
+ ozoneOutputStream = bucket.createMultipartKey(keyName,
+ sampleData.length(), partNumber, uploadID);
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
+ ozoneOutputStream.close();
+
+ commitUploadPartInfo = ozoneOutputStream
+ .getCommitUploadPartInfo();
+
+ assertNotNull(commitUploadPartInfo);
+ assertNotNull(commitUploadPartInfo.getPartName());
+
+ // PartName should be different from old part Name.
+ assertNotEquals("Part names should be different", partName,
+ commitUploadPartInfo.getPartName());
+ }
+
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 8daa12b..40a5396 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUpload
import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestV1;
import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestV1;
import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
@@ -188,6 +189,9 @@ public final class OzoneManagerRatisUtils {
}
return new S3InitiateMultipartUploadRequest(omRequest);
case CommitMultiPartUpload:
+ if (isBucketFSOptimized()) {
+ return new S3MultipartUploadCommitPartRequestV1(omRequest);
+ }
return new S3MultipartUploadCommitPartRequest(omRequest);
case AbortMultiPartUpload:
return new S3MultipartUploadAbortRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 7319690..85b8bc3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -692,9 +692,17 @@ public abstract class OMKeyRequest extends OMClientRequest {
// error no such multipart upload.
String uploadID = args.getMultipartUploadID();
Preconditions.checkNotNull(uploadID);
- String multipartKey = omMetadataManager
- .getMultipartKey(args.getVolumeName(), args.getBucketName(),
- args.getKeyName(), uploadID);
+ String multipartKey = "";
+ if (omPathInfo != null) {
+ // FileTable metadata format
+ multipartKey = omMetadataManager.getMultipartKey(
+ omPathInfo.getLastKnownParentId(),
+ omPathInfo.getLeafNodeName(), uploadID);
+ } else {
+ multipartKey = omMetadataManager
+ .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+ args.getKeyName(), uploadID);
+ }
OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
multipartKey);
if (partKeyInfo == null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index b5c9fdf..14dba3d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -216,7 +216,17 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
volumeName, bucketName);
}
}
+ logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+ bucketName, keyName, exception, result);
+ return omClientResponse;
+ }
+
+ @SuppressWarnings("parameternumber")
+ protected void logResult(OzoneManager ozoneManager,
+ MultipartInfoInitiateRequest multipartInfoInitiateRequest,
+ Map<String, String> auditMap, String volumeName, String bucketName,
+ String keyName, IOException exception, Result result) {
// audit log
auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
@@ -238,7 +248,5 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
multipartInfoInitiateRequest);
}
-
- return omClientResponse;
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
index 3507090..d472bc1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
@@ -22,7 +22,6 @@ import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -228,28 +227,8 @@ public class S3InitiateMultipartUploadRequestV1
volumeName, bucketName);
}
}
-
- // audit log
- auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
- OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
- exception, getOmRequest().getUserInfo()));
-
- switch (result) {
- case SUCCESS:
- LOG.debug("S3 InitiateMultipart Upload request for Key {} in " +
- "Volume/Bucket {}/{} is successfully completed", keyName,
- volumeName, bucketName);
- break;
- case FAILURE:
- ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
- LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
- "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
- exception);
- break;
- default:
- LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
- multipartInfoInitiateRequest);
- }
+ logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+ bucketName, keyName, exception, result);
return omClientResponse;
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index d529f92..d235998 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -129,16 +129,16 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
String uploadID = keyArgs.getMultipartUploadID();
- multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
- keyName, uploadID);
+ multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+ omMetadataManager, uploadID);
multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
.get(multipartKey);
long clientID = multipartCommitUploadPartRequest.getClientID();
- openKey = omMetadataManager.getOpenKey(
- volumeName, bucketName, keyName, clientID);
+ openKey = getOpenKey(volumeName, bucketName, keyName, omMetadataManager,
+ clientID);
String ozoneKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);
@@ -248,6 +248,31 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
}
}
+ logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+ auditMap, volumeName, bucketName, keyName, exception, partName,
+ result);
+
+ return omClientResponse;
+ }
+
+ private String getOpenKey(String volumeName, String bucketName,
+ String keyName, OMMetadataManager omMetadataManager, long clientID) {
+ return omMetadataManager.getOpenKey(volumeName, bucketName,
+ keyName, clientID);
+ }
+
+ private String getMultipartKey(String volumeName, String bucketName,
+ String keyName, OMMetadataManager omMetadataManager, String uploadID) {
+ return omMetadataManager.getMultipartKey(volumeName, bucketName,
+ keyName, uploadID);
+ }
+
+ @SuppressWarnings("parameternumber")
+ protected void logResult(OzoneManager ozoneManager,
+ MultipartCommitUploadPartRequest multipartCommitUploadPartRequest,
+ KeyArgs keyArgs, Map<String, String> auditMap, String volumeName,
+ String bucketName, String keyName, IOException exception,
+ String partName, Result result) {
// audit log
// Add MPU related information.
auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
@@ -273,8 +298,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
"{}", multipartCommitUploadPartRequest);
}
-
- return omClientResponse;
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
similarity index 75%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
index d529f92..5546010 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.ozone.om.request.s3.multipart;
import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -28,30 +28,25 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart
- .S3MultipartUploadCommitPartResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponseV1;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .MultipartCommitUploadPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
import java.util.Map;
import java.util.stream.Collectors;
@@ -61,31 +56,17 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
/**
* Handle Multipart upload commit upload part file.
*/
-public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
+public class S3MultipartUploadCommitPartRequestV1
+ extends S3MultipartUploadCommitPartRequest {
private static final Logger LOG =
- LoggerFactory.getLogger(S3MultipartUploadCommitPartRequest.class);
+ LoggerFactory.getLogger(S3MultipartUploadCommitPartRequestV1.class);
- public S3MultipartUploadCommitPartRequest(OMRequest omRequest) {
+ public S3MultipartUploadCommitPartRequestV1(OMRequest omRequest) {
super(omRequest);
}
@Override
- public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
- MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
- getOmRequest().getCommitMultiPartUploadRequest();
-
- KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
- return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(
- multipartCommitUploadPartRequest.toBuilder()
- .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())
- .setKeyName(validateAndNormalizeKey(
- ozoneManager.getEnableFileSystemPaths(),
- keyArgs.getKeyName()))))
- .setUserInfo(getUserInfo()).build();
- }
-
- @Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
@@ -128,26 +109,31 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketId = omBucketInfo.getObjectID();
+ long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+ keyName, omMetadataManager);
+
String uploadID = keyArgs.getMultipartUploadID();
- multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
- keyName, uploadID);
+ multipartKey = omMetadataManager.getMultipartKey(parentID,
+ fileName, uploadID);
multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
.get(multipartKey);
long clientID = multipartCommitUploadPartRequest.getClientID();
- openKey = omMetadataManager.getOpenKey(
- volumeName, bucketName, keyName, clientID);
+ openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientID);
- String ozoneKey = omMetadataManager.getOzoneKey(
- volumeName, bucketName, keyName);
-
- omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+ omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+ omMetadataManager, openKey, keyName);
if (omKeyInfo == null) {
throw new OMException("Failed to commit Multipart Upload key, as " +
- openKey + "entry is not found in the openKey table",
+ openKey + " entry is not found in the openKey table",
KEY_NOT_FOUND);
}
@@ -155,12 +141,13 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
omKeyInfo.setDataSize(keyArgs.getDataSize());
omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
.map(OmKeyLocationInfo::getFromProtobuf)
- .collect(Collectors.toList()), true);
+ .collect(Collectors.toList()));
// Set Modification time
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+ String ozoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
partName = ozoneKey + clientID;
if (multipartKeyInfo == null) {
@@ -197,8 +184,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
// S3MultipartUplodaCommitPartResponse before being added to
// DeletedKeyTable.
- // Add to cache.
-
// Delete from open key table and add it to multipart info table.
// No need to add cache entries to delete table, as no
// read/write requests that info for validation.
@@ -225,7 +210,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
omResponse.setCommitMultiPartUploadResponse(
MultipartCommitUploadPartResponse.newBuilder()
.setPartName(partName));
- omClientResponse = new S3MultipartUploadCommitPartResponse(
+ omClientResponse = new S3MultipartUploadCommitPartResponseV1(
omResponse.build(), multipartKey, openKey,
multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
ozoneManager.isRatisEnabled(),
@@ -235,7 +220,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
- omClientResponse = new S3MultipartUploadCommitPartResponse(
+ omClientResponse = new S3MultipartUploadCommitPartResponseV1(
createErrorOMResponse(omResponse, exception), multipartKey, openKey,
multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
ozoneManager.isRatisEnabled(), copyBucketInfo);
@@ -248,34 +233,11 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
}
}
- // audit log
- // Add MPU related information.
- auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
- String.valueOf(keyArgs.getMultipartNumber()));
- auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName);
- auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
- OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY,
- auditMap, exception,
- getOmRequest().getUserInfo()));
-
- switch (result) {
- case SUCCESS:
- LOG.debug("MultipartUpload Commit is successfully for Key:{} in " +
- "Volume/Bucket {}/{}", keyName, volumeName, bucketName);
- break;
- case FAILURE:
- ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails();
- LOG.error("MultipartUpload Commit is failed for Key:{} in " +
- "Volume/Bucket {}/{}", keyName, volumeName, bucketName,
- exception);
- break;
- default:
- LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
- "{}", multipartCommitUploadPartRequest);
- }
+ logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+ auditMap, volumeName, bucketName, keyName, exception, partName,
+ result);
return omClientResponse;
}
}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java
new file mode 100644
index 0000000..d8e5cc5
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+
+/**
+ * Response for S3MultipartUploadCommitPart request.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
+ MULTIPARTFILEINFO_TABLE})
+public class S3MultipartUploadCommitPartResponseV1
+ extends S3MultipartUploadCommitPartResponse {
+
+ /**
+ * Regular response.
+ * 1. Update MultipartKey in MultipartInfoTable with new PartKeyInfo
+ * 2. Delete openKey from OpenKeyTable
+ * 3. If old PartKeyInfo exists, put it in DeletedKeyTable
+ * @param omResponse
+ * @param multipartKey
+ * @param openKey
+ * @param omMultipartKeyInfo
+ * @param oldPartKeyInfo
+ */
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ public S3MultipartUploadCommitPartResponseV1(@Nonnull OMResponse omResponse,
+ String multipartKey, String openKey,
+ @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+ @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+ @Nullable OmKeyInfo openPartKeyInfoToBeDeleted,
+ boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+
+ super(omResponse, multipartKey, openKey, omMultipartKeyInfo,
+ oldPartKeyInfo, openPartKeyInfoToBeDeleted, isRatisEnabled,
+ omBucketInfo);
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 641ee8d..9f6cff8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -152,8 +152,7 @@ public class TestS3MultipartRequest {
TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName,
keyName, clientID, dataSize, multipartUploadID, partNumber);
S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
- new S3MultipartUploadCommitPartRequest(omRequest);
-
+ getS3MultipartUploadCommitReq(omRequest);
OMRequest modifiedRequest =
s3MultipartUploadCommitPartRequest.preExecute(ozoneManager);
@@ -247,4 +246,15 @@ public class TestS3MultipartRequest {
return modifiedRequest;
}
+
+ protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+ OMRequest omRequest) {
+ return new S3MultipartUploadCommitPartRequest(omRequest);
+ }
+
+ protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+ OMRequest initiateMPURequest) {
+ return new S3InitiateMultipartUploadRequest(initiateMPURequest);
+ }
+
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index d623b17..6c8beb0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -41,27 +41,28 @@ public class TestS3MultipartUploadCommitPartRequest
public void testPreExecute() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
doPreExecuteCommitMPU(volumeName, bucketName, keyName, Time.now(),
UUID.randomUUID().toString(), 1);
}
-
@Test
public void testValidateAndUpdateCacheSuccess() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
+ createParentPath(volumeName, bucketName);
+
OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
bucketName, keyName);
S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
- new S3InitiateMultipartUploadRequest(initiateMPURequest);
+ getS3InitiateMultipartUploadReq(initiateMPURequest);
OMClientResponse omClientResponse =
s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -75,12 +76,10 @@ public class TestS3MultipartUploadCommitPartRequest
bucketName, keyName, clientID, multipartUploadID, 1);
S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
- new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+ getS3MultipartUploadCommitReq(commitMultipartRequest);
// Add key to open key table.
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
- keyName, clientID, HddsProtos.ReplicationType.RATIS,
- HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+ addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
omClientResponse =
s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -90,8 +89,8 @@ public class TestS3MultipartUploadCommitPartRequest
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.OK);
- String multipartKey = omMetadataManager.getMultipartKey(volumeName,
- bucketName, keyName, multipartUploadID);
+ String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+ multipartUploadID);
Assert.assertNotNull(
omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -107,11 +106,12 @@ public class TestS3MultipartUploadCommitPartRequest
public void testValidateAndUpdateCacheMultipartNotFound() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
+ createParentPath(volumeName, bucketName);
long clientID = Time.now();
String multipartUploadID = UUID.randomUUID().toString();
@@ -120,12 +120,10 @@ public class TestS3MultipartUploadCommitPartRequest
bucketName, keyName, clientID, multipartUploadID, 1);
S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
- new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+ getS3MultipartUploadCommitReq(commitMultipartRequest);
// Add key to open key table.
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
- keyName, clientID, HddsProtos.ReplicationType.RATIS,
- HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+ addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
OMClientResponse omClientResponse =
s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -135,8 +133,8 @@ public class TestS3MultipartUploadCommitPartRequest
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR);
- String multipartKey = omMetadataManager.getMultipartKey(volumeName,
- bucketName, keyName, multipartUploadID);
+ String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+ multipartUploadID);
Assert.assertNull(
omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -147,7 +145,7 @@ public class TestS3MultipartUploadCommitPartRequest
public void testValidateAndUpdateCacheKeyNotFound() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
@@ -163,7 +161,7 @@ public class TestS3MultipartUploadCommitPartRequest
// part. It will fail with KEY_NOT_FOUND
S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
- new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+ getS3MultipartUploadCommitReq(commitMultipartRequest);
OMClientResponse omClientResponse =
@@ -180,7 +178,7 @@ public class TestS3MultipartUploadCommitPartRequest
public void testValidateAndUpdateCacheBucketFound() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
@@ -195,7 +193,7 @@ public class TestS3MultipartUploadCommitPartRequest
// part. It will fail with KEY_NOT_FOUND
S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
- new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+ getS3MultipartUploadCommitReq(commitMultipartRequest);
OMClientResponse omClientResponse =
@@ -206,4 +204,26 @@ public class TestS3MultipartUploadCommitPartRequest
== OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
}
+
+ protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+ String keyName, long clientID) throws Exception {
+ TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+ keyName, clientID, HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+ }
+
+ protected String getKeyName() {
+ return UUID.randomUUID().toString();
+ }
+
+ protected String getMultipartKey(String volumeName, String bucketName,
+ String keyName, String multipartUploadID) {
+ return omMetadataManager.getMultipartKey(volumeName,
+ bucketName, keyName, multipartUploadID);
+ }
+
+ protected void createParentPath(String volumeName, String bucketName)
+ throws Exception {
+ // no parent hierarchy
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java
new file mode 100644
index 0000000..d0d01e1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart upload commit part request.
+ */
+public class TestS3MultipartUploadCommitPartRequestV1
+ extends TestS3MultipartUploadCommitPartRequest {
+
+ private String dirName = "a/b/c/";
+
+ private long parentID;
+
+ protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+ OMRequest omRequest) {
+ return new S3MultipartUploadCommitPartRequestV1(omRequest);
+ }
+
+ protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+ OMRequest initiateMPURequest) {
+ return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+ }
+
+ protected String getKeyName() {
+ return dirName + UUID.randomUUID().toString();
+ }
+
+ protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+ String keyName, long clientID) throws Exception {
+ long txnLogId = 10000;
+ OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+ bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID,
+ txnLogId, Time.now());
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ TestOMRequestUtils.addFileToKeyTable(true, false,
+ fileName, omKeyInfo, clientID, txnLogId, omMetadataManager);
+ }
+
+ protected String getMultipartKey(String volumeName, String bucketName,
+ String keyName, String multipartUploadID) {
+ String fileName = StringUtils.substringAfter(keyName, dirName);
+ return omMetadataManager.getMultipartKey(parentID, fileName,
+ multipartUploadID);
+ }
+
+ protected OMRequest doPreExecuteInitiateMPU(String volumeName,
+ String bucketName, String keyName) throws Exception {
+ OMRequest omRequest =
+ TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+ keyName);
+
+ S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+ new S3InitiateMultipartUploadRequestV1(omRequest);
+
+ OMRequest modifiedRequest =
+ s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
+
+ Assert.assertNotEquals(omRequest, modifiedRequest);
+ Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+ Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+ .getKeyArgs().getMultipartUploadID());
+ Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+ .getKeyArgs().getModificationTime() > 0);
+
+ return modifiedRequest;
+ }
+
+ protected void createParentPath(String volumeName, String bucketName)
+ throws Exception {
+ // Create parent dirs for the path
+ parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+ dirName, omMetadataManager);
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 76ceb0e..106ae61 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.om.response.s3.multipart;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -156,6 +157,24 @@ public class TestS3MultipartResponse {
.setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
}
+ public PartKeyInfo createPartKeyInfoV1(
+ String volumeName, String bucketName, long parentID, String fileName,
+ int partNumber) {
+ return PartKeyInfo.newBuilder()
+ .setPartNumber(partNumber)
+ .setPartName(omMetadataManager.getMultipartKey(parentID, fileName,
+ UUID.randomUUID().toString()))
+ .setPartKeyInfo(KeyInfo.newBuilder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(fileName)
+ .setDataSize(100L) // Just set dummy size for testing
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setParentID(parentID)
+ .setType(HddsProtos.ReplicationType.RATIS)
+ .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
+ }
public S3InitiateMultipartUploadResponse createS3InitiateMPUResponseV1(
String volumeName, String bucketName, long parentID, String keyName,
@@ -198,4 +217,61 @@ public class TestS3MultipartResponse {
omKeyInfo, parentDirInfos);
}
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseV1(
+ String volumeName, String bucketName, long parentID, String keyName,
+ String multipartUploadID,
+ OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+ OmMultipartKeyInfo multipartKeyInfo,
+ OzoneManagerProtocolProtos.Status status, String openKey)
+ throws IOException {
+ if (multipartKeyInfo == null) {
+ multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+ .setUploadID(multipartUploadID)
+ .setCreationTime(Time.now())
+ .setReplicationType(HddsProtos.ReplicationType.RATIS)
+ .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+ .setParentID(parentID)
+ .build();
+ }
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+
+ String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+ boolean isRatisEnabled = true;
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+
+ OmKeyInfo openPartKeyInfoToBeDeleted = new OmKeyInfo.Builder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(fileName)
+ .setFileName(fileName)
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setReplicationType(HddsProtos.ReplicationType.RATIS)
+ .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+ .setOmKeyLocationInfos(Collections.singletonList(
+ new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+ .build();
+
+ OMResponse omResponse = OMResponse.newBuilder()
+ .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
+ .setStatus(status).setSuccess(true)
+ .setCommitMultiPartUploadResponse(
+ OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse
+ .newBuilder().setPartName(volumeName)).build();
+
+ return new S3MultipartUploadCommitPartResponseV1(omResponse, multipartKey,
+ openKey, multipartKeyInfo, oldPartKeyInfo,
+ openPartKeyInfoToBeDeleted, isRatisEnabled, omBucketInfo);
+ }
+
+ private String getMultipartKey(long parentID, String keyName,
+ String multipartUploadID) {
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ return omMetadataManager.getMultipartKey(parentID, fileName,
+ multipartUploadID);
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java
new file mode 100644
index 0000000..511ffef
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Test multipart upload commit part response.
+ */
+public class TestS3MultipartUploadCommitPartResponseV1
+ extends TestS3MultipartResponse {
+
+ private String dirName = "a/b/c/";
+
+ private long parentID;
+
+ @Test
+ public void testAddDBToBatch() throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
+ String multipartUploadID = UUID.randomUUID().toString();
+
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ createParentPath(volumeName, bucketName);
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+ multipartUploadID);
+ long clientId = Time.now();
+ String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+ clientId);
+
+ S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+ createS3CommitMPUResponseV1(volumeName, bucketName, parentID, keyName,
+ multipartUploadID, null, null,
+ OzoneManagerProtocolProtos.Status.OK, openKey);
+
+ s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+ batchOperation);
+
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+ Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+ Assert.assertNotNull(
+ omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+ // As no parts are created, so no entries should be there in delete table.
+ Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+ omMetadataManager.getDeletedTable()));
+ }
+
+ @Test
+ public void testAddDBToBatchWithParts() throws Exception {
+
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
+
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ createParentPath(volumeName, bucketName);
+
+ String multipartUploadID = UUID.randomUUID().toString();
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+ multipartUploadID);
+
+ S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+ createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+ keyName, multipartUploadID, new ArrayList<>());
+
+ s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+ batchOperation);
+
+ // Add some dummy parts for testing.
+ // Not added any key locations, as this just test is to see entries are
+ // adding to delete table or not.
+ OmMultipartKeyInfo omMultipartKeyInfo =
+ s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+ PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+ fileName, 1);
+
+ addPart(1, part1, omMultipartKeyInfo);
+
+ long clientId = Time.now();
+ String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+ clientId);
+
+ S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+ createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+ keyName, multipartUploadID,
+ omMultipartKeyInfo.getPartKeyInfo(1),
+ omMultipartKeyInfo,
+ OzoneManagerProtocolProtos.Status.OK, openKey);
+
+ s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+ batchOperation);
+
+ Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+ Assert.assertNull(
+ omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+ // As 1 parts are created, so 1 entry should be there in delete table.
+ Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+ omMetadataManager.getDeletedTable()));
+
+ String part1DeletedKeyName =
+ omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+ Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+ part1DeletedKeyName));
+
+ RepeatedOmKeyInfo ro =
+ omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+ Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
+ ro.getOmKeyInfoList().get(0));
+ }
+
+ @Test
+ public void testWithMultipartUploadError() throws Exception {
+
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = getKeyName();
+
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ createParentPath(volumeName, bucketName);
+
+ String multipartUploadID = UUID.randomUUID().toString();
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+ multipartUploadID);
+
+ S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+ createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+ keyName, multipartUploadID, new ArrayList<>());
+
+ s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+ batchOperation);
+
+ // Add some dummy parts for testing.
+ // Not added any key locations, as this just test is to see entries are
+ // adding to delete table or not.
+ OmMultipartKeyInfo omMultipartKeyInfo =
+ s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+ PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+ fileName, 1);
+
+ addPart(1, part1, omMultipartKeyInfo);
+
+ long clientId = Time.now();
+ String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+ clientId);
+
+ S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+ createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+ keyName + "invalid", multipartUploadID,
+ omMultipartKeyInfo.getPartKeyInfo(1),
+ omMultipartKeyInfo, OzoneManagerProtocolProtos.Status
+ .NO_SUCH_MULTIPART_UPLOAD_ERROR, openKey);
+
+ s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+ batchOperation);
+
+ Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+ Assert.assertNull(
+ omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+ // openkey entry should be there in delete table.
+ Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+ omMetadataManager.getDeletedTable()));
+
+ Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+ openKey));
+ }
+
+ private String getKeyName() {
+ return dirName + UUID.randomUUID().toString();
+ }
+
+ private void createParentPath(String volumeName, String bucketName)
+ throws Exception {
+ // Create parent dirs for the path
+ parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+ dirName, omMetadataManager);
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org