You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by na...@apache.org on 2019/08/29 13:10:40 UTC

[hadoop] branch ozone-0.4.1 updated (d8226cb -> e5c64a8)

This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from d8226cb  HDDS-1915. Remove hadoop script from ozone distribution
     new 1845d5a  HDDS-1942. Support copy during S3 multipart upload part creation
     new e5c64a8  HDDS-1950. S3 MPU part-list call fails if there are no parts

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/main/smoketest/s3/MultipartUpload.robot    |  52 +++++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  24 ++-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 111 ++++++++++
 ...CopyObjectResponse.java => CopyPartResult.java} |  22 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  79 +++++--
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |   2 +
 .../hadoop/ozone/client/OzoneBucketStub.java       |  15 +-
 ...plete.java => TestMultipartUploadWithCopy.java} | 237 +++++++++++----------
 ...BucketResponse.java => TestObjectEndpoint.java} |  29 ++-
 9 files changed, 421 insertions(+), 150 deletions(-)
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
 copy hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/{CopyObjectResponse.java => CopyPartResult.java} (86%)
 copy hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/{TestMultipartUploadComplete.java => TestMultipartUploadWithCopy.java} (55%)
 copy hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/{TestBucketResponse.java => TestObjectEndpoint.java} (54%)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/02: HDDS-1950. S3 MPU part-list call fails if there are no parts

Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e5c64a8f523d0ae302e5b428d575ad6fd59ecb2a
Author: Márton Elek <el...@apache.org>
AuthorDate: Sun Aug 11 14:32:00 2019 +0200

    HDDS-1950. S3 MPU part-list call fails if there are no parts
    
    Signed-off-by: Anu Engineer <ae...@apache.org>
    (cherry picked from commit aef6a4fe0d04fe0d42fa36dc04cac2cc53ae8efd)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  24 ++++-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 111 +++++++++++++++++++++
 2 files changed, 133 insertions(+), 2 deletions(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 30b4604..d351320 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1296,8 +1296,9 @@ public class KeyManagerImpl implements KeyManager {
             multipartKeyInfo.getPartKeyInfoMap();
         Iterator<Map.Entry<Integer, PartKeyInfo>> partKeyInfoMapIterator =
             partKeyInfoMap.entrySet().iterator();
-        HddsProtos.ReplicationType replicationType =
-            partKeyInfoMap.firstEntry().getValue().getPartKeyInfo().getType();
+
+        HddsProtos.ReplicationType replicationType = null;
+
         int count = 0;
         List<OmPartInfo> omPartInfoList = new ArrayList<>();
 
@@ -1314,11 +1315,30 @@ public class KeyManagerImpl implements KeyManager {
                 partKeyInfo.getPartKeyInfo().getModificationTime(),
                 partKeyInfo.getPartKeyInfo().getDataSize());
             omPartInfoList.add(omPartInfo);
+
+            //if there are parts, use replication type from one of the parts
             replicationType = partKeyInfo.getPartKeyInfo().getType();
             count++;
           }
         }
 
+        if (replicationType == null) {
+          //if there are no parts, use the replicationType from the open key.
+
+          OmKeyInfo omKeyInfo =
+              metadataManager.getOpenKeyTable().get(multipartKey);
+
+          if (omKeyInfo == null) {
+            throw new IllegalStateException(
+                "Open key is missing for multipart upload " + multipartKey);
+          }
+
+          replicationType = omKeyInfo.getType();
+
+        }
+        Preconditions.checkNotNull(replicationType,
+            "Replication type can't be identified");
+
         if (partKeyInfoMapIterator.hasNext()) {
           Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry =
               partKeyInfoMapIterator.next();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
new file mode 100644
index 0000000..a5a446c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
+import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Unit test key manager.
+ */
+public class TestKeyManagerUnit {
+
+  private OmMetadataManagerImpl metadataManager;
+  private KeyManagerImpl keyManager;
+
+  @Before
+  public void setup() throws IOException {
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
+        GenericTestUtils.getRandomizedTestDir().toString());
+    metadataManager = new OmMetadataManagerImpl(configuration);
+    keyManager = new KeyManagerImpl(
+        Mockito.mock(ScmBlockLocationProtocol.class),
+        metadataManager,
+        configuration,
+        "omtest",
+        Mockito.mock(OzoneBlockTokenSecretManager.class)
+    );
+  }
+
+  @Test
+  public void listMultipartUploadPartsWithZeroUpload() throws IOException {
+    //GIVEN
+    createBucket(metadataManager, "vol1", "bucket1");
+
+    OmMultipartInfo omMultipartInfo =
+        initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
+
+    //WHEN
+    OmMultipartUploadListParts omMultipartUploadListParts = keyManager
+        .listParts("vol1", "bucket1", "dir/key1", omMultipartInfo.getUploadID(),
+            0, 10);
+
+    Assert.assertEquals(0,
+        omMultipartUploadListParts.getPartInfoList().size());
+
+  }
+
+  private void createBucket(OmMetadataManagerImpl omMetadataManager,
+      String volume, String bucket)
+      throws IOException {
+    omMetadataManager.getBucketTable()
+        .put(omMetadataManager.getBucketKey(volume, bucket),
+            OmBucketInfo.newBuilder()
+                .setVolumeName(volume)
+                .setBucketName(bucket)
+                .setStorageType(StorageType.DISK)
+                .setIsVersionEnabled(false)
+                .setAcls(new ArrayList<>())
+                .build());
+  }
+
+  private OmMultipartInfo initMultipartUpload(KeyManagerImpl omtest,
+      String volume, String bucket, String key)
+      throws IOException {
+    OmKeyArgs key1 = new Builder()
+        .setVolumeName(volume)
+        .setBucketName(bucket)
+        .setKeyName(key)
+        .setType(ReplicationType.RATIS)
+        .setFactor(ReplicationFactor.THREE)
+        .setAcls(new ArrayList<>())
+        .build();
+    return omtest.initiateMultipartUpload(key1);
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/02: HDDS-1942. Support copy during S3 multipart upload part creation

Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 1845d5a970819e36d06bfc4d3c693bd2b64ed893
Author: Márton Elek <el...@apache.org>
AuthorDate: Sun Aug 11 14:45:02 2019 +0200

    HDDS-1942. Support copy during S3 multipart upload part creation
    
    Signed-off-by: Anu Engineer <ae...@apache.org>
    (cherry picked from commit 2fcd0da7dcbc15793041efb079210e06272482a4)
---
 .../src/main/smoketest/s3/MultipartUpload.robot    |  52 +++++
 .../hadoop/ozone/s3/endpoint/CopyPartResult.java   |  69 ++++++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  79 +++++--
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |   2 +
 .../hadoop/ozone/client/OzoneBucketStub.java       |  15 +-
 .../s3/endpoint/TestMultipartUploadWithCopy.java   | 233 +++++++++++++++++++++
 .../ozone/s3/endpoint/TestObjectEndpoint.java      |  53 +++++
 7 files changed, 483 insertions(+), 20 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 0133d50..df95f4d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -200,3 +200,55 @@ Test Multipart Upload with the simplified aws s3 cp API
                         Execute AWSS3Cli        cp s3://${BUCKET}/mpyawscli /tmp/part1.result
                         Execute AWSS3Cli        rm s3://${BUCKET}/mpyawscli
                         Compare files           /tmp/part1        /tmp/part1.result
+
+Test Multipart Upload Put With Copy
+    Run Keyword         Create Random file      5
+    ${result} =         Execute AWSS3APICli     put-object --bucket ${BUCKET} --key copytest/source --body /tmp/part1
+
+
+    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key copytest/destination
+
+    ${uploadID} =       Execute and checkrc      echo '${result}' | jq -r '.UploadId'    0
+                        Should contain           ${result}    ${BUCKET}
+                        Should contain           ${result}    UploadId
+
+    ${result} =         Execute AWSS3APICli      upload-part-copy --bucket ${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copytest/source
+                        Should contain           ${result}    ${BUCKET}
+                        Should contain           ${result}    ETag
+                        Should contain           ${result}    LastModified
+    ${eTag1} =          Execute and checkrc      echo '${result}' | jq -r '.CopyPartResult.ETag'   0
+
+
+                        Execute AWSS3APICli     complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]'
+                        Execute AWSS3APICli     get-object --bucket ${BUCKET} --key copytest/destination /tmp/part-result
+
+                        Compare files           /tmp/part1        /tmp/part-result
+
+Test Multipart Upload Put With Copy and range
+    Run Keyword         Create Random file      10
+    ${result} =         Execute AWSS3APICli     put-object --bucket ${BUCKET} --key copyrange/source --body /tmp/part1
+
+
+    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key copyrange/destination
+
+    ${uploadID} =       Execute and checkrc      echo '${result}' | jq -r '.UploadId'    0
+                        Should contain           ${result}    ${BUCKET}
+                        Should contain           ${result}    UploadId
+
+    ${result} =         Execute AWSS3APICli      upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758
+                        Should contain           ${result}    ${BUCKET}
+                        Should contain           ${result}    ETag
+                        Should contain           ${result}    LastModified
+    ${eTag1} =          Execute and checkrc      echo '${result}' | jq -r '.CopyPartResult.ETag'   0
+
+    ${result} =         Execute AWSS3APICli      upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=10485758-10485760
+                        Should contain           ${result}    ${BUCKET}
+                        Should contain           ${result}    ETag
+                        Should contain           ${result}    LastModified
+    ${eTag2} =          Execute and checkrc      echo '${result}' | jq -r '.CopyPartResult.ETag'   0
+
+
+                        Execute AWSS3APICli     complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
+                        Execute AWSS3APICli     get-object --bucket ${BUCKET} --key copyrange/destination /tmp/part-result
+
+                        Compare files           /tmp/part1        /tmp/part-result
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
new file mode 100644
index 0000000..c4e65aa
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
+import java.time.Instant;
+
+import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
+
+/**
+ * Copy object Response.
+ */
+@XmlAccessorType(XmlAccessType.FIELD)
+@XmlRootElement(name = "CopyPartResult",
+    namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
+public class CopyPartResult {
+
+  @XmlJavaTypeAdapter(IsoDateAdapter.class)
+  @XmlElement(name = "LastModified")
+  private Instant lastModified;
+
+  @XmlElement(name = "ETag")
+  private String eTag;
+
+  public CopyPartResult() {
+  }
+
+  public CopyPartResult(String eTag) {
+    this.eTag = eTag;
+    this.lastModified = Instant.now();
+  }
+
+  public Instant getLastModified() {
+    return lastModified;
+  }
+
+  public void setLastModified(Instant lastModified) {
+    this.lastModified = lastModified;
+  }
+
+  public String getETag() {
+    return eTag;
+  }
+
+  public void setETag(String tag) {
+    this.eTag = tag;
+  }
+
+}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 70bfb7f..490f0fb 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -76,11 +76,13 @@ import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH;
 import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
 import org.apache.commons.io.IOUtils;
 
+import org.apache.commons.lang3.tuple.Pair;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
@@ -537,12 +539,45 @@ public class ObjectEndpoint extends EndpointBase {
       OzoneBucket ozoneBucket = getBucket(bucket);
       OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(
           key, length, partNumber, uploadID);
-      IOUtils.copy(body, ozoneOutputStream);
+
+      String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
+      if (copyHeader != null) {
+        Pair<String, String> result = parseSourceHeader(copyHeader);
+
+        String sourceBucket = result.getLeft();
+        String sourceKey = result.getRight();
+
+        try (OzoneInputStream sourceObject =
+            getBucket(sourceBucket).readKey(sourceKey)) {
+
+          String range =
+              headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
+          if (range != null) {
+            RangeHeader rangeHeader =
+                RangeHeaderParserUtil.parseRangeHeader(range, 0);
+            IOUtils.copyLarge(sourceObject, ozoneOutputStream,
+                rangeHeader.getStartOffset(),
+                rangeHeader.getEndOffset() - rangeHeader.getStartOffset());
+
+          } else {
+            IOUtils.copy(sourceObject, ozoneOutputStream);
+          }
+        }
+
+      } else {
+        IOUtils.copy(body, ozoneOutputStream);
+      }
       ozoneOutputStream.close();
       OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
           ozoneOutputStream.getCommitUploadPartInfo();
-      return Response.status(Status.OK).header("ETag",
-          omMultipartCommitUploadPartInfo.getPartName()).build();
+      String eTag = omMultipartCommitUploadPartInfo.getPartName();
+
+      if (copyHeader != null) {
+        return Response.ok(new CopyPartResult(eTag)).build();
+      } else {
+        return Response.ok().header("ETag",
+            eTag).build();
+      }
 
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
@@ -628,20 +663,10 @@ public class ObjectEndpoint extends EndpointBase {
                                         boolean storageTypeDefault)
       throws OS3Exception, IOException {
 
-    if (copyHeader.startsWith("/")) {
-      copyHeader = copyHeader.substring(1);
-    }
-    int pos = copyHeader.indexOf("/");
-    if (pos == -1) {
-      OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
-          .INVALID_ARGUMENT, copyHeader);
-      ex.setErrorMessage("Copy Source must mention the source bucket and " +
-          "key: sourcebucket/sourcekey");
-      throw ex;
-    }
-    String sourceBucket = copyHeader.substring(0, pos);
-    String sourceKey = copyHeader.substring(pos + 1);
+    Pair<String, String> result = parseSourceHeader(copyHeader);
 
+    String sourceBucket = result.getLeft();
+    String sourceKey = result.getRight();
     OzoneInputStream sourceInputStream = null;
     OzoneOutputStream destOutputStream = null;
     boolean closed = false;
@@ -720,4 +745,26 @@ public class ObjectEndpoint extends EndpointBase {
       }
     }
   }
+
+  /**
+   * Parse the key and bucket name from copy header.
+   */
+  @VisibleForTesting
+  public static Pair<String, String> parseSourceHeader(String copyHeader)
+      throws OS3Exception {
+    String header = copyHeader;
+    if (header.startsWith("/")) {
+      header = copyHeader.substring(1);
+    }
+    int pos = header.indexOf("/");
+    if (pos == -1) {
+      OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
+          .INVALID_ARGUMENT, header);
+      ex.setErrorMessage("Copy Source must mention the source bucket and " +
+          "key: sourcebucket/sourcekey");
+      throw ex;
+    }
+
+    return Pair.of(header.substring(0, pos), header.substring(pos + 1));
+  }
 }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
index 38c4e6a..9516823 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
@@ -34,6 +34,8 @@ public final class S3Consts {
   }
 
   public static final String COPY_SOURCE_HEADER = "x-amz-copy-source";
+  public static final String COPY_SOURCE_HEADER_RANGE =
+      "x-amz-copy-source-range";
   public static final String STORAGE_CLASS_HEADER = "x-amz-storage-class";
   public static final String ENCODING_TYPE = "url";
 
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index 9f96266..bbf94cc 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -210,16 +210,23 @@ public class OzoneBucketStub extends OzoneBucket {
       }
 
       int count = 1;
+
+      ByteArrayOutputStream output = new ByteArrayOutputStream();
+
       for (Map.Entry<Integer, String> part: partsMap.entrySet()) {
+        Part recordedPart = partsList.get(part.getKey());
         if (part.getKey() != count) {
           throw new OMException(ResultCodes.MISSING_UPLOAD_PARTS);
-        } else if (!part.getValue().equals(
-            partsList.get(part.getKey()).getPartName())) {
-          throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST);
         } else {
-          count++;
+          if (!part.getValue().equals(recordedPart.getPartName())) {
+            throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST);
+          } else {
+            count++;
+            output.write(recordedPart.getContent());
+          }
         }
       }
+      keyContents.put(key, output.toByteArray());
     }
 
     return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key,
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
new file mode 100644
index 0000000..425bfc4
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Scanner;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientStub;
+import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+
+import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+/**
+ * Class to test Multipart upload where parts are created with copy header.
+ */
+
+public class TestMultipartUploadWithCopy {
+
+  private final static ObjectEndpoint REST = new ObjectEndpoint();
+
+  private final static String BUCKET = "s3bucket";
+  private final static String KEY = "key2";
+  private final static String EXISTING_KEY = "key1";
+  private static final String EXISTING_KEY_CONTENT = "testkey";
+  private final static OzoneClientStub CLIENT = new OzoneClientStub();
+  private static final int RANGE_FROM = 2;
+  private static final int RANGE_TO = 4;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+
+    ObjectStore objectStore = CLIENT.getObjectStore();
+    objectStore.createS3Bucket("ozone", BUCKET);
+
+    OzoneBucket bucket = getOzoneBucket(objectStore, BUCKET);
+
+    byte[] keyContent = EXISTING_KEY_CONTENT.getBytes();
+    try (OutputStream stream = bucket
+        .createKey(EXISTING_KEY, keyContent.length, ReplicationType.RATIS,
+            ReplicationFactor.THREE, new HashMap<>())) {
+      stream.write(keyContent);
+    }
+
+    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
+    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
+        "STANDARD");
+
+    REST.setHeaders(headers);
+    REST.setClient(CLIENT);
+  }
+
+  @Test
+  public void testMultipart() throws Exception {
+
+    // Initiate multipart upload
+    String uploadID = initiateMultipartUpload(KEY);
+
+    List<Part> partsList = new ArrayList<>();
+
+    // Upload parts
+    String content = "Multipart Upload 1";
+    int partNumber = 1;
+
+    Part part1 = uploadPart(KEY, uploadID, partNumber, content);
+    partsList.add(part1);
+
+    partNumber = 2;
+    Part part2 =
+        uploadPartWithCopy(KEY, uploadID, partNumber,
+            BUCKET + "/" + EXISTING_KEY, null);
+    partsList.add(part2);
+
+    partNumber = 3;
+    Part part3 =
+        uploadPartWithCopy(KEY, uploadID, partNumber,
+            BUCKET + "/" + EXISTING_KEY,
+            "bytes=" + RANGE_FROM + "-" + RANGE_TO);
+    partsList.add(part3);
+
+    // complete multipart upload
+    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
+        CompleteMultipartUploadRequest();
+    completeMultipartUploadRequest.setPartList(partsList);
+
+    completeMultipartUpload(KEY, completeMultipartUploadRequest,
+        uploadID);
+
+    OzoneBucket bucket = getOzoneBucket(CLIENT.getObjectStore(), BUCKET);
+    try (InputStream is = bucket.readKey(KEY)) {
+      String keyContent = new Scanner(is).useDelimiter("\\A").next();
+      Assert.assertEquals(content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT
+          .substring(RANGE_FROM, RANGE_TO), keyContent);
+    }
+  }
+
+  private String initiateMultipartUpload(String key) throws IOException,
+      OS3Exception {
+    setHeaders();
+    Response response = REST.initializeMultipartUpload(BUCKET, key);
+    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
+        (MultipartUploadInitiateResponse) response.getEntity();
+    assertNotNull(multipartUploadInitiateResponse.getUploadID());
+    String uploadID = multipartUploadInitiateResponse.getUploadID();
+
+    assertEquals(response.getStatus(), 200);
+
+    return uploadID;
+
+  }
+
+  private Part uploadPart(String key, String uploadID, int partNumber, String
+      content) throws IOException, OS3Exception {
+    setHeaders();
+    ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
+    Response response = REST.put(BUCKET, key, content.length(), partNumber,
+        uploadID, body);
+    assertEquals(response.getStatus(), 200);
+    assertNotNull(response.getHeaderString("ETag"));
+    Part part = new Part();
+    part.seteTag(response.getHeaderString("ETag"));
+    part.setPartNumber(partNumber);
+
+    return part;
+  }
+
+  private Part uploadPartWithCopy(String key, String uploadID, int partNumber,
+      String keyOrigin, String range) throws IOException, OS3Exception {
+    Map<String, String> additionalHeaders = new HashMap<>();
+    additionalHeaders.put(COPY_SOURCE_HEADER, keyOrigin);
+    if (range != null) {
+      additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, range);
+
+    }
+    setHeaders(additionalHeaders);
+
+    ByteArrayInputStream body = new ByteArrayInputStream("".getBytes());
+    Response response = REST.put(BUCKET, key, 0, partNumber,
+        uploadID, body);
+    assertEquals(response.getStatus(), 200);
+
+    CopyPartResult result = (CopyPartResult) response.getEntity();
+    assertNotNull(result.getETag());
+    assertNotNull(result.getLastModified());
+    Part part = new Part();
+    part.seteTag(result.getETag());
+    part.setPartNumber(partNumber);
+
+    return part;
+  }
+
+  private void completeMultipartUpload(String key,
+      CompleteMultipartUploadRequest completeMultipartUploadRequest,
+      String uploadID) throws IOException, OS3Exception {
+    setHeaders();
+    Response response = REST.completeMultipartUpload(BUCKET, key, uploadID,
+        completeMultipartUploadRequest);
+
+    assertEquals(response.getStatus(), 200);
+
+    CompleteMultipartUploadResponse completeMultipartUploadResponse =
+        (CompleteMultipartUploadResponse) response.getEntity();
+
+    assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET);
+    assertEquals(completeMultipartUploadResponse.getKey(), KEY);
+    assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET);
+    assertNotNull(completeMultipartUploadResponse.getETag());
+  }
+
+  private void setHeaders(Map<String, String> additionalHeaders) {
+    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
+    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
+        "STANDARD");
+
+    additionalHeaders
+        .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v));
+    REST.setHeaders(headers);
+  }
+
+  private void setHeaders() {
+    setHeaders(new HashMap<>());
+  }
+
+  private static OzoneBucket getOzoneBucket(ObjectStore objectStore,
+      String bucketName)
+      throws IOException {
+
+    String ozoneBucketName = objectStore.getOzoneBucketName(bucketName);
+    String ozoneVolumeName = objectStore.getOzoneVolumeName(bucketName);
+
+    return objectStore.getVolume(ozoneVolumeName).getBucket(ozoneBucketName);
+  }
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
new file mode 100644
index 0000000..070c827
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test static utility methods of the ObjectEndpoint.
+ */
+public class TestObjectEndpoint {
+
+  @Test
+  public void parseSourceHeader() throws OS3Exception {
+    Pair<String, String> bucketKey =
+        ObjectEndpoint.parseSourceHeader("bucket1/key1");
+
+    Assert.assertEquals("bucket1", bucketKey.getLeft());
+
+    Assert.assertEquals("key1", bucketKey.getRight());
+  }
+
+  @Test
+  public void parseSourceHeaderWithPrefix() throws OS3Exception {
+    Pair<String, String> bucketKey =
+        ObjectEndpoint.parseSourceHeader("/bucket1/key1");
+
+    Assert.assertEquals("bucket1", bucketKey.getLeft());
+
+    Assert.assertEquals("key1", bucketKey.getRight());
+  }
+
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org