You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by mm...@apache.org on 2018/05/23 16:43:17 UTC

[incubator-pulsar] branch master updated: Use our own minimal S3Mock rather than findify S3Mock (#1806)

This is an automated email from the ASF dual-hosted git repository.

mmerli pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pulsar.git


The following commit(s) were added to refs/heads/master by this push:
     new b0b0891  Use our own minimal S3Mock rather than findify S3Mock (#1806)
b0b0891 is described below

commit b0b0891147d8b079bc9c6bb8a3d92a4513bb24b7
Author: Ivan Kelly <iv...@apache.org>
AuthorDate: Wed May 23 17:43:13 2018 +0100

    Use our own minimal S3Mock rather than findify S3Mock (#1806)
    
    * Use our own minimal S3Mock rather than findify S3Mock
    
    The findify mock's project isn't very active, and is missing some
    functionallity, such as setting metadata on multipart objects (it's in
    the code base, but there's no release). Rather than wait for them to
    release with features we need, we should use our own mock, which we
    can update ourselves as needed. Changed to the mock should be
    validated against the real S3 using -DtestRealAWS=true.
    
    Master Issue: #1511
    
    * fix test failures
---
 pom.xml                                            |   8 -
 pulsar-broker/pom.xml                              |   6 -
 .../broker/s3offload/S3ManagedLedgerOffloader.java |   6 +-
 .../s3offload/S3ManagedLedgerOffloaderTest.java    |  23 +-
 .../org/apache/pulsar/broker/s3offload/S3Mock.java | 334 +++++++++++++++++++++
 .../apache/pulsar/broker/s3offload/S3TestBase.java |  20 +-
 6 files changed, 346 insertions(+), 51 deletions(-)

diff --git a/pom.xml b/pom.xml
index 4c8bebf..22506ca 100644
--- a/pom.xml
+++ b/pom.xml
@@ -154,7 +154,6 @@ flexible messaging model and an intuitive client API.</description>
 
     <!-- test dependencies -->
     <disruptor.version>3.4.0</disruptor.version>
-    <s3mock.version>0.2.5</s3mock.version>
 
     <!-- Plugin dependencies -->
     <protobuf-maven-plugin.version>0.5.0</protobuf-maven-plugin.version>
@@ -729,13 +728,6 @@ flexible messaging model and an intuitive client API.</description>
         <artifactId>disruptor</artifactId>
         <version>${disruptor.version}</version>
       </dependency>
-
-      <dependency>
-        <groupId>io.findify</groupId>
-        <artifactId>s3mock_2.12</artifactId>
-        <version>${s3mock.version}</version>
-        <scope>test</scope>
-      </dependency>
     </dependencies>
   </dependencyManagement>
 
diff --git a/pulsar-broker/pom.xml b/pulsar-broker/pom.xml
index 600b374..7ed7407 100644
--- a/pulsar-broker/pom.xml
+++ b/pulsar-broker/pom.xml
@@ -148,12 +148,6 @@
       <artifactId>aws-java-sdk-s3</artifactId>
     </dependency>
 
-    <dependency>
-      <groupId>io.findify</groupId>
-      <artifactId>s3mock_2.12</artifactId>
-      <scope>test</scope>
-    </dependency>
-
     <!-- functions related dependencies (begin) -->
 
     <dependency>
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloader.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloader.java
index 9cb1486..43e4dda 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloader.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloader.java
@@ -118,7 +118,7 @@ public class S3ManagedLedgerOffloader implements LedgerOffloader {
                 .withLedgerMetadata(readHandle.getLedgerMetadata());
             String dataBlockKey = dataBlockOffloadKey(readHandle.getId(), uuid);
             String indexBlockKey = indexBlockOffloadKey(readHandle.getId(), uuid);
-            InitiateMultipartUploadRequest dataBlockReq = new InitiateMultipartUploadRequest(bucket, dataBlockKey);
+            InitiateMultipartUploadRequest dataBlockReq = new InitiateMultipartUploadRequest(bucket, dataBlockKey, new ObjectMetadata());
             InitiateMultipartUploadResult dataBlockRes = null;
 
             // init multi part upload for data block.
@@ -172,9 +172,9 @@ public class S3ManagedLedgerOffloader implements LedgerOffloader {
                     .withUploadId(dataBlockRes.getUploadId())
                     .withPartETags(etags));
             } catch (Throwable t) {
+                promise.completeExceptionally(t);
                 s3client.abortMultipartUpload(
                     new AbortMultipartUploadRequest(bucket, dataBlockKey, dataBlockRes.getUploadId()));
-                promise.completeExceptionally(t);
                 return;
             }
 
@@ -191,8 +191,8 @@ public class S3ManagedLedgerOffloader implements LedgerOffloader {
                     metadata));
                 promise.complete(null);
             } catch (Throwable t) {
-                s3client.deleteObject(bucket, dataBlockKey);
                 promise.completeExceptionally(t);
+                s3client.deleteObject(bucket, dataBlockKey);
                 return;
             }
         });
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloaderTest.java
index 5e465b6..9f0d253 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloaderTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloaderTest.java
@@ -116,13 +116,8 @@ class S3ManagedLedgerOffloaderTest extends S3TestBase {
 
     @Test
     public void testBucketDoesNotExist() throws Exception {
-        ServiceConfiguration conf = new ServiceConfiguration();
-        conf.setManagedLedgerOffloadDriver(S3ManagedLedgerOffloader.DRIVER_NAME);
-        conf.setS3ManagedLedgerOffloadBucket("no-bucket");
-        conf.setS3ManagedLedgerOffloadServiceEndpoint(s3endpoint);
-        conf.setS3ManagedLedgerOffloadRegion("eu-west-1");
-        LedgerOffloader offloader = S3ManagedLedgerOffloader.create(conf, scheduler);
-
+        LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, "no-bucket", scheduler,
+                                                                 DEFAULT_BLOCK_SIZE, DEFAULT_READ_BUFFER_SIZE);
         try {
             offloader.offload(buildReadHandle(), UUID.randomUUID(), new HashMap<>()).get();
             Assert.fail("Shouldn't be able to add to bucket");
@@ -383,11 +378,10 @@ class S3ManagedLedgerOffloaderTest extends S3TestBase {
 
     @Test
     public void testDeleteOffloaded() throws Exception {
-        int maxBlockSize = 1024;
-        int entryCount = 3;
-        ReadHandle readHandle = buildReadHandle(maxBlockSize, entryCount);
+        ReadHandle readHandle = buildReadHandle(DEFAULT_BLOCK_SIZE, 1);
         UUID uuid = UUID.randomUUID();
-        LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler, maxBlockSize, DEFAULT_READ_BUFFER_SIZE);
+        LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler,
+                                                                 DEFAULT_BLOCK_SIZE, DEFAULT_READ_BUFFER_SIZE);
 
         // verify object exist after offload
         offloader.offload(readHandle, uuid, new HashMap<>()).get();
@@ -402,11 +396,10 @@ class S3ManagedLedgerOffloaderTest extends S3TestBase {
 
     @Test
     public void testDeleteOffloadedFail() throws Exception {
-        int maxBlockSize = 1024;
-        int entryCount = 3;
-        ReadHandle readHandle = buildReadHandle(maxBlockSize, entryCount);
+        ReadHandle readHandle = buildReadHandle(DEFAULT_BLOCK_SIZE, 1);
         UUID uuid = UUID.randomUUID();
-        LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler, maxBlockSize, DEFAULT_READ_BUFFER_SIZE);
+        LedgerOffloader offloader = new S3ManagedLedgerOffloader(s3client, BUCKET, scheduler,
+                                                                 DEFAULT_BLOCK_SIZE, DEFAULT_READ_BUFFER_SIZE);
         String failureString = "fail deleteOffloaded";
         AmazonS3 mockS3client = Mockito.spy(s3client);
         Mockito
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3Mock.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3Mock.java
new file mode 100644
index 0000000..74d48e2
--- /dev/null
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3Mock.java
@@ -0,0 +1,334 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.s3offload;
+
+import com.amazonaws.services.s3.AbstractAmazonS3;
+import com.amazonaws.services.s3.model.AmazonS3Exception;
+import com.amazonaws.services.s3.model.Bucket;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
+import com.amazonaws.services.s3.model.CopyObjectRequest;
+import com.amazonaws.services.s3.model.CopyObjectResult;
+import com.amazonaws.services.s3.model.DeleteObjectRequest;
+import com.amazonaws.services.s3.model.DeleteObjectsRequest;
+import com.amazonaws.services.s3.model.DeleteObjectsResult;
+import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
+import com.amazonaws.services.s3.model.GetObjectRequest;
+import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
+import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import com.amazonaws.services.s3.model.PartETag;
+import com.amazonaws.services.s3.model.PutObjectRequest;
+import com.amazonaws.services.s3.model.PutObjectResult;
+import com.amazonaws.services.s3.model.S3Object;
+import com.amazonaws.services.s3.model.UploadPartRequest;
+import com.amazonaws.services.s3.model.UploadPartResult;
+
+import com.google.common.collect.ComparisonChain;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.stream.Collectors;
+
+/**
+ * Minimal mock for amazon client.
+ * If making any changes, validate they behave the same as S3 by running all S3 tests with -DtestRealAWS=true
+ */
+class S3Mock extends AbstractAmazonS3 {
+    @Override
+    public boolean doesBucketExistV2(String bucketName) {
+        return buckets.containsKey(bucketName);
+    }
+
+    @Override
+    public boolean doesObjectExist(String bucketName, String objectName) {
+        return buckets.containsKey(bucketName) && getBucket(bucketName).hasObject(objectName);
+    }
+
+    @Override
+    public Bucket createBucket(String bucketName) {
+        return buckets.computeIfAbsent(bucketName, (k) -> new MockBucket(k));
+    }
+
+    private MockBucket getBucket(String bucketName) throws AmazonS3Exception {
+        MockBucket bucket = buckets.get(bucketName);
+        if (bucket != null) {
+            return bucket;
+        } else {
+            throw new AmazonS3Exception("NoSuchBucket: Bucket doesn't exist");
+        }
+    }
+
+    @Override
+    public PutObjectResult putObject(PutObjectRequest putObjectRequest)
+            throws AmazonS3Exception {
+        return getBucket(putObjectRequest.getBucketName()).putObject(putObjectRequest);
+    }
+
+    @Override
+    public S3Object getObject(GetObjectRequest getObjectRequest) {
+        return getBucket(getObjectRequest.getBucketName()).getObject(getObjectRequest);
+    }
+
+    @Override
+    public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest)
+            throws AmazonS3Exception {
+        return getBucket(getObjectMetadataRequest.getBucketName()).getObjectMetadata(getObjectMetadataRequest);
+    }
+
+    @Override
+    public void deleteObject(DeleteObjectRequest deleteObjectRequest)
+            throws AmazonS3Exception {
+        getBucket(deleteObjectRequest.getBucketName()).deleteObject(deleteObjectRequest.getKey());
+    }
+
+    @Override
+    public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest)
+            throws AmazonS3Exception {
+        List<DeleteObjectsResult.DeletedObject> results = deleteObjectsRequest.getKeys().stream().map((k) -> {
+                getBucket(deleteObjectsRequest.getBucketName()).deleteObject(k.getKey());
+                DeleteObjectsResult.DeletedObject res = new DeleteObjectsResult.DeletedObject();
+                res.setKey(k.getKey());
+                return res;
+            }).collect(Collectors.toList());
+        return new DeleteObjectsResult(results);
+    }
+
+    @Override
+    public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest)
+            throws AmazonS3Exception {
+        S3Object from = getObject(new GetObjectRequest(copyObjectRequest.getSourceBucketName(),
+                                                       copyObjectRequest.getSourceKey()));
+        ObjectMetadata newMetadata = copyObjectRequest.getNewObjectMetadata();
+        if (newMetadata == null) {
+            newMetadata = from.getObjectMetadata();
+        }
+        newMetadata.setContentLength(from.getObjectMetadata().getContentLength());
+        putObject(new PutObjectRequest(copyObjectRequest.getDestinationBucketName(),
+                                       copyObjectRequest.getDestinationKey(),
+                                       from.getObjectContent(),
+                                       newMetadata));
+        return new CopyObjectResult();
+    }
+
+    @Override
+    public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request)
+            throws AmazonS3Exception {
+        return getBucket(request.getBucketName()).initMultipart(request);
+    }
+
+    @Override
+    public UploadPartResult uploadPart(UploadPartRequest request)
+            throws AmazonS3Exception {
+        return getBucket(request.getBucketName()).uploadPart(request);
+    }
+
+    @Override
+    public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request)
+            throws AmazonS3Exception {
+        return getBucket(request.getBucketName()).completeMultipart(request);
+    }
+
+    ConcurrentHashMap<String, MockBucket> buckets = new ConcurrentHashMap<>();
+
+    static class MockBucket extends Bucket {
+        ConcurrentHashMap<String, MockObject> objects = new ConcurrentHashMap<>();
+        ConcurrentHashMap<String, MockMultipart> inprogressMultipart = new ConcurrentHashMap<>();
+
+        MockBucket(String name) {
+            super(name);
+        }
+
+        boolean hasObject(String key) {
+            return objects.containsKey(key);
+        }
+
+        PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonS3Exception {
+            byte[] bytes = streamToBytes(putObjectRequest.getInputStream(),
+                                         (int)putObjectRequest.getMetadata().getContentLength());
+            objects.put(putObjectRequest.getKey(),
+                        new MockObject(putObjectRequest.getMetadata(), bytes));
+            return new PutObjectResult();
+        }
+
+        S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonS3Exception {
+            MockObject obj = objects.get(getObjectRequest.getKey());
+            if (obj == null) {
+                throw new AmazonS3Exception("Object doesn't exist");
+            }
+
+            S3Object s3obj = new S3Object();
+            s3obj.setBucketName(getObjectRequest.getBucketName());
+            s3obj.setKey(getObjectRequest.getKey());
+
+            if (getObjectRequest.getRange() != null) {
+                long[] range = getObjectRequest.getRange();
+                int size = (int)(range[1] - range[0] + 1);
+                ObjectMetadata metadata = obj.metadata.clone();
+                metadata.setHeader("Content-Range",
+                                   String.format("bytes %d-%d/%d",
+                                                 range[0], range[1], size));
+                s3obj.setObjectMetadata(metadata);
+                s3obj.setObjectContent(new ByteArrayInputStream(obj.data, (int)range[0], size));
+                return s3obj;
+            } else {
+                s3obj.setObjectMetadata(obj.metadata);
+                s3obj.setObjectContent(new ByteArrayInputStream(obj.data));
+                return s3obj;
+            }
+        }
+
+        void deleteObject(String key) {
+            objects.remove(key);
+        }
+
+        ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest)
+                throws AmazonS3Exception {
+            MockObject obj = objects.get(getObjectMetadataRequest.getKey());
+            if (obj == null) {
+                throw new AmazonS3Exception("Object doesn't exist");
+            }
+            return obj.metadata;
+        }
+
+        InitiateMultipartUploadResult initMultipart(InitiateMultipartUploadRequest request)
+                throws AmazonS3Exception {
+            String uploadId = UUID.randomUUID().toString();
+            inprogressMultipart.put(uploadId, new MockMultipart(request.getKey(),
+                                                                request.getObjectMetadata()));
+            InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
+            result.setBucketName(request.getBucketName());
+            result.setKey(request.getKey());
+            result.setUploadId(uploadId);
+            return result;
+        }
+
+        MockMultipart getMultipart(String uploadId, String key) throws AmazonS3Exception {
+            MockMultipart multi = inprogressMultipart.get(uploadId);
+            if (multi == null) {
+                throw new AmazonS3Exception("No such upload " + uploadId);
+            }
+            if (!multi.key.equals(key)) {
+                throw new AmazonS3Exception("Wrong key for upload " + uploadId
+                                            + ", expected " + key
+                                            + ", got " + multi.key);
+            }
+            return multi;
+        }
+
+        UploadPartResult uploadPart(UploadPartRequest request)
+                throws AmazonS3Exception {
+            MockMultipart multi = getMultipart(request.getUploadId(), request.getKey());
+            byte[] bytes = streamToBytes(request.getInputStream(), (int)request.getPartSize());
+            UploadPartResult result = new UploadPartResult();
+            result.setPartNumber(request.getPartNumber());
+            result.setETag(multi.addPart(request.getPartNumber(), bytes));
+            return result;
+        }
+
+        CompleteMultipartUploadResult completeMultipart(CompleteMultipartUploadRequest request)
+                throws AmazonS3Exception {
+            MockMultipart multi = getMultipart(request.getUploadId(), request.getKey());
+            inprogressMultipart.remove(request.getUploadId());
+            objects.put(request.getKey(), multi.complete(request.getPartETags()));
+            CompleteMultipartUploadResult result = new CompleteMultipartUploadResult();
+            result.setBucketName(request.getBucketName());
+            result.setKey(request.getKey());
+            return result;
+        }
+    }
+
+    private static byte[] streamToBytes(InputStream data, int length) throws AmazonS3Exception {
+        byte[] bytes = new byte[length];
+        try {
+            for (int i = 0; i < length; i++) {
+                bytes[i] = (byte)data.read();
+            }
+        } catch (IOException ioe) {
+            throw new AmazonS3Exception("Error loading data", ioe);
+        }
+        return bytes;
+    }
+
+    static class MockObject {
+        final ObjectMetadata metadata;
+        final byte[] data;
+        final Map<Integer, long[]> partRanges;
+
+
+        MockObject(ObjectMetadata metadata, byte[] data) {
+            this(metadata, data, null);
+        }
+
+        MockObject(ObjectMetadata metadata, byte[] data, Map<Integer, long[]> partRanges) {
+            this.metadata = metadata;
+            this.data = data;
+            this.partRanges = partRanges;
+        }
+
+    }
+
+    static class MockMultipart {
+        final String key;
+        final ObjectMetadata metadata;
+        final ConcurrentSkipListMap<PartETag, byte[]> parts = new ConcurrentSkipListMap<>(
+                (etag1, etag2) -> ComparisonChain.start().compare(etag1.getPartNumber(),
+                                                                  etag2.getPartNumber()).result());
+
+        MockMultipart(String key, ObjectMetadata metadata) {
+            this.key = key;
+            this.metadata = metadata;
+        }
+
+        String addPart(int partNumber, byte[] bytes) {
+            String etag = UUID.randomUUID().toString();
+            parts.put(new PartETag(partNumber, etag), bytes);
+            return etag;
+        }
+
+        MockObject complete(List<PartETag> tags) throws AmazonS3Exception {
+            if (parts.size() != tags.size()
+                || !parts.keySet().containsAll(tags)) {
+                throw new AmazonS3Exception("Tags don't match uploaded parts");
+            }
+
+            int totalSize = parts.values().stream().map(v -> v.length).reduce(0, (acc, v) -> acc + v);
+            byte[] full = new byte[totalSize];
+
+            Map<Integer, long[]> partRanges = new HashMap<>();
+            int start = 0;
+            for (Map.Entry<PartETag, byte[]> e : parts.entrySet()) {
+                int partLength = e.getValue().length;
+                System.arraycopy(e.getValue(), 0, full, start, partLength);
+                partRanges.put(e.getKey().getPartNumber(),
+                               new long[] { start, start + partLength - 1 });
+                start += partLength;
+            }
+            metadata.setContentLength(totalSize);
+            return new MockObject(metadata, full, partRanges);
+        }
+    }
+}
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3TestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3TestBase.java
index 2c65c0c..b56c850 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3TestBase.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3TestBase.java
@@ -24,32 +24,20 @@ import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
 import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.AmazonS3ClientBuilder;
 
-import io.findify.s3mock.S3Mock;
-
 import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.AfterMethod;
 
 public class S3TestBase {
     final static String BUCKET = "pulsar-unittest";
 
-    S3Mock s3mock = null;
     protected AmazonS3 s3client = null;
-    protected String s3endpoint = null;
 
     @BeforeMethod
     public void start() throws Exception {
-        s3mock = new S3Mock.Builder().withPort(0).withInMemoryBackend().build();
-        int port = s3mock.start().localAddress().getPort();
-        s3endpoint = "http://localhost:" + port;
-
         if (Boolean.parseBoolean(System.getProperty("testRealAWS", "false"))) {
             // To use this, ~/.aws must be configured with credentials and a default region
             s3client = AmazonS3ClientBuilder.standard().build();
         } else {
-            s3client = AmazonS3ClientBuilder.standard()
-                .withEndpointConfiguration(new EndpointConfiguration(s3endpoint, "foobar"))
-                .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
-                .withPathStyleAccessEnabled(true).build();
+            s3client = new S3Mock();
         }
 
         if (!s3client.doesBucketExistV2(BUCKET)) {
@@ -57,10 +45,4 @@ public class S3TestBase {
         }
     }
 
-    @AfterMethod
-    public void stop() throws Exception {
-        if (s3mock != null) {
-            s3mock.shutdown();
-        }
-    }
 }

-- 
To stop receiving notification emails like this one, please contact
mmerli@apache.org.