You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by ma...@apache.org on 2019/08/06 21:50:46 UTC
svn commit: r1864570 - in /jackrabbit/oak/trunk:
oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/
oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/
oak-blob-plugins/src/test/java/org/apache/...
Author: mattryan
Date: Tue Aug 6 21:50:46 2019
New Revision: 1864570
URL: http://svn.apache.org/viewvc?rev=1864570&view=rev
Log:
OAK-8520: Return existing DataRecord when completeUpload called for existing binary
Modified:
jackrabbit/oak/trunk/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java
jackrabbit/oak/trunk/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java
Modified: jackrabbit/oak/trunk/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java?rev=1864570&r1=1864569&r2=1864570&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java (original)
+++ jackrabbit/oak/trunk/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java Tue Aug 6 21:50:46 2019
@@ -949,30 +949,33 @@ public class AzureBlobStoreBackend exten
DataRecordUploadToken uploadToken = DataRecordUploadToken.fromEncodedToken(uploadTokenStr, getOrCreateReferenceKey());
String key = uploadToken.getBlobId();
DataIdentifier blobId = new DataIdentifier(getIdentifierName(key));
- try {
- if (uploadToken.getUploadId().isPresent()) {
- // An existing upload ID means this is a multi-part upload
- CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key);
- List<BlockEntry> blocks = blob.downloadBlockList(
- BlockListingFilter.UNCOMMITTED,
- AccessCondition.generateEmptyCondition(),
- null,
- null);
- blob.commitBlockList(blocks);
- }
- // else do nothing - single put is already complete
- if (! exists(blobId)) {
+ if (! exists(blobId)) {
+ try {
+ if (uploadToken.getUploadId().isPresent()) {
+ // An existing upload ID means this is a multi-part upload
+ CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key);
+ List<BlockEntry> blocks = blob.downloadBlockList(
+ BlockListingFilter.UNCOMMITTED,
+ AccessCondition.generateEmptyCondition(),
+ null,
+ null);
+ blob.commitBlockList(blocks);
+ }
+ // else do nothing - single put is already complete
+
+ if (!exists(blobId)) {
+ throw new DataRecordUploadException(
+ String.format("Unable to finalize direct write of binary %s", blobId));
+ }
+ } catch (URISyntaxException | StorageException e) {
throw new DataRecordUploadException(
- String.format("Unable to finalize direct write of binary %s", blobId));
+ String.format("Unable to finalize direct write of binary %s", blobId),
+ e
+ );
}
}
- catch (URISyntaxException | StorageException e) {
- throw new DataRecordUploadException(
- String.format("Unable to finalize direct write of binary %s", blobId),
- e
- );
- }
+ // else return the already existing record for this blob ID
return getRecord(blobId);
}
Modified: jackrabbit/oak/trunk/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java?rev=1864570&r1=1864569&r2=1864570&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java (original)
+++ jackrabbit/oak/trunk/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java Tue Aug 6 21:50:46 2019
@@ -906,33 +906,37 @@ public class S3Backend extends AbstractS
DataRecordUploadToken uploadToken = DataRecordUploadToken.fromEncodedToken(uploadTokenStr, getOrCreateReferenceKey());
String blobId = uploadToken.getBlobId();
- if (uploadToken.getUploadId().isPresent()) {
- // An existing upload ID means this is a multi-part upload
- String uploadId = uploadToken.getUploadId().get();
- ListPartsRequest listPartsRequest = new ListPartsRequest(bucket, blobId, uploadId);
- PartListing listing = s3service.listParts(listPartsRequest);
- List<PartETag> eTags = Lists.newArrayList();
- for (PartSummary partSummary : listing.getParts()) {
- PartETag eTag = new PartETag(partSummary.getPartNumber(), partSummary.getETag());
- eTags.add(eTag);
- }
+ DataIdentifier dataIdentifier = new DataIdentifier(getIdentifierName(blobId));
- CompleteMultipartUploadRequest completeReq = new CompleteMultipartUploadRequest(
- bucket,
- blobId,
- uploadId,
- eTags
- );
+ if (! exists(dataIdentifier)) {
+ if (uploadToken.getUploadId().isPresent()) {
+ // An existing upload ID means this is a multi-part upload
+ String uploadId = uploadToken.getUploadId().get();
+ ListPartsRequest listPartsRequest = new ListPartsRequest(bucket, blobId, uploadId);
+ PartListing listing = s3service.listParts(listPartsRequest);
+ List<PartETag> eTags = Lists.newArrayList();
+ for (PartSummary partSummary : listing.getParts()) {
+ PartETag eTag = new PartETag(partSummary.getPartNumber(), partSummary.getETag());
+ eTags.add(eTag);
+ }
+
+ CompleteMultipartUploadRequest completeReq = new CompleteMultipartUploadRequest(
+ bucket,
+ blobId,
+ uploadId,
+ eTags
+ );
- s3service.completeMultipartUpload(completeReq);
- }
- // else do nothing - single-put upload is already complete
+ s3service.completeMultipartUpload(completeReq);
+ }
+ // else do nothing - single-put upload is already complete
- if (! s3service.doesObjectExist(bucket, blobId)) {
- throw new DataRecordUploadException(
- String.format("Unable to finalize direct write of binary %s", blobId)
- );
+ if (!s3service.doesObjectExist(bucket, blobId)) {
+ throw new DataRecordUploadException(
+ String.format("Unable to finalize direct write of binary %s", blobId)
+ );
+ }
}
return getRecord(new DataIdentifier(getIdentifierName(blobId)));
Modified: jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java?rev=1864570&r1=1864569&r2=1864570&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java (original)
+++ jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java Tue Aug 6 21:50:46 2019
@@ -29,9 +29,11 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.InputStream;
+import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
+import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Map;
@@ -489,6 +491,35 @@ public abstract class AbstractDataRecord
}
@Test
+ public void testCompleteAlreadyUploadedBinaryReturnsSameBinaryIT() throws DataStoreException, DataRecordUploadException, IOException {
+ DataRecordAccessProvider ds = getDataStore();
+ DataRecord uploadedRecord = null;
+ try {
+ DataRecordUpload uploadContext = ds.initiateDataRecordUpload(ONE_MB, 1);
+ InputStream uploadStream = randomStream(0, ONE_MB);
+ URI uploadURI = uploadContext.getUploadURIs().iterator().next();
+ doHttpsUpload(uploadStream, ONE_MB, uploadURI);
+ uploadedRecord = ds.completeDataRecordUpload(uploadContext.getUploadToken());
+ assertEquals(ONE_MB, uploadedRecord.getLength());
+
+ DataRecord secondRecord = ds.completeDataRecordUpload(uploadContext.getUploadToken());
+
+ assertEquals(uploadedRecord.getIdentifier(), secondRecord.getIdentifier());
+ assertEquals(uploadedRecord.getLength(), secondRecord.getLength());
+ StringWriter original = new StringWriter();
+ IOUtils.copy(uploadedRecord.getStream(), original, Charset.forName("UTF-8"));
+ StringWriter second = new StringWriter();
+ IOUtils.copy(secondRecord.getStream(), second, Charset.forName("UTF-8"));
+ assertEquals(original.toString(), second.toString());
+ }
+ finally {
+ if (null != uploadedRecord) {
+ doDeleteRecord((DataStore) ds, uploadedRecord.getIdentifier());
+ }
+ }
+ }
+
+ @Test
public void testSinglePutDirectUploadIT() throws DataRecordUploadException, DataStoreException, IOException {
DataRecordAccessProvider ds = getDataStore();
for (InitUploadResult res : Lists.newArrayList(