You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@jclouds.apache.org by ga...@apache.org on 2015/02/13 03:04:36 UTC

[2/3] jclouds git commit: JCLOUDS-258: Support MPU for generic S3

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuGraphData.java
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuGraphData.java b/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuGraphData.java
new file mode 100644
index 0000000..1bf0fe7
--- /dev/null
+++ b/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuGraphData.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.jclouds.s3.blobstore.strategy.internal;
+
+import org.jclouds.s3.blobstore.strategy.MultipartUpload;
+
+/**
+ * Print out on the console some graph data regarding the partitioning algorithm.
+ */
+public class MpuGraphData {
+
+   private static void calculate(long length, MultipartUploadSlicingAlgorithm algorithm) {
+      System.out.println("" + length + " " + algorithm.getParts() + " "
+            + algorithm.calculateChunkSize(length) + " " + algorithm.getRemaining());
+   }
+
+   private static void foreach(long from, long to1, long to2, long to3, MultipartUploadSlicingAlgorithm algorithm) {
+      long i = 0L;
+      long step = 1L;
+      System.out.println("=== {" + from + "," + to1 + "} ===");
+      for (; i < to1 - from; step += i, i += step) {
+         calculate(i + from, algorithm);
+      }
+      calculate(to1, algorithm);
+      System.out.println("=== {" + (to1 + 1) + "," + to2 + "} ===");
+      for (; i < to2 - to1; step += i / 20, i += step) {
+         calculate(i + from, algorithm);
+      }
+      calculate(to2, algorithm);
+      System.out.println("=== {" + (to2 + 1) + "," + to3 + "} ===");
+      for (; i < to3 - to2; step += i / 40, i += step) {
+         calculate(i + from, algorithm);
+      }
+      calculate(to3, algorithm);
+   }
+
+   public static void main(String[] args) {
+      MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm();
+      foreach(1L,
+            algorithm.defaultPartSize * algorithm.magnitudeBase,
+            MultipartUpload.MAX_PART_SIZE * algorithm.magnitudeBase,
+            MultipartUpload.MAX_PART_SIZE * MultipartUpload.MAX_NUMBER_OF_PARTS,
+            algorithm);
+   }
+
+}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuPartitioningAlgorithmTest.java
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuPartitioningAlgorithmTest.java b/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuPartitioningAlgorithmTest.java
new file mode 100644
index 0000000..ac6dc7c
--- /dev/null
+++ b/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/MpuPartitioningAlgorithmTest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.jclouds.s3.blobstore.strategy.internal;
+
+import static org.testng.Assert.assertEquals;
+
+import org.jclouds.s3.blobstore.strategy.MultipartUpload;
+import org.testng.annotations.Test;
+
+/**
+ * Tests behavior of {@code MultipartUploadSlicingAlgorithm} from the perspective of
+ * partitioning algorithm
+ */
+@Test(groups = "unit")
+public class MpuPartitioningAlgorithmTest {
+
+   /**
+    * Below 1 parts the MPU is not used.
+    * When we have more than {@code MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE} bytes data,
+    * the MPU starts to become active.
+    */
+   @Test
+   public void testLowerLimitFromWhereMultipartBecomeActive() {
+      MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm();
+      
+      // exactly the MIN_PART_SIZE
+      long length = MultipartUpload.MIN_PART_SIZE;
+      long chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
+      assertEquals(strategy.getParts(), 0);
+      assertEquals(strategy.getRemaining(), length);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+
+      // below DEFAULT_PART_SIZE
+      length = MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE;
+      chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
+      assertEquals(strategy.getParts(), 0);
+      assertEquals(strategy.getRemaining(), length);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+
+      // exactly the DEFAULT_PART_SIZE
+      length = MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE + 1;
+      chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
+      assertEquals(strategy.getParts(), 1);
+      assertEquals(strategy.getRemaining(), 1);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length); 
+   }
+
+   /**
+    * Phase 1 of the algorithm.
+    * ChunkSize does not grow from a {@code MultipartUploadStrategy.DEFAULT_PART_SIZE} 
+    * until we reach {@code MultipartUploadSlicingAlgorithm.MAGNITUDE_BASE} number of parts. 
+    */
+   @Test
+   public void testWhenChunkSizeHasToStartGrowing() {
+      MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm();
+      // upper limit while we still have exactly defaultPartSize chunkSize
+      long length = MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE * MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE;
+      long chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
+      assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE - 1);
+      assertEquals(strategy.getRemaining(), MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+
+      // then chunkSize is increasing
+      length += 1;
+      chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE * 2);
+      assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE / 2);
+      assertEquals(strategy.getRemaining(), 1);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+   }
+   
+   /**
+    * Phase 2 of the algorithm.
+    * The number of parts does not grow from {@code MultipartUploadSlicingAlgorithm.MAGNITUDE_BASE} 
+    * until we reach the {@code MultipartUploadStrategy.MAX_PART_SIZE}. 
+    */
+   @Test
+   public void testWhenPartsHasToStartGrowingFromMagnitudeBase() {
+      MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm();
+      // upper limit while we still have exactly MAGNITUDE_BASE parts (together with the remaining)
+      long length = MultipartUpload.MAX_PART_SIZE * MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE;
+      long chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUpload.MAX_PART_SIZE);
+      assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE - 1);
+      assertEquals(strategy.getRemaining(), MultipartUpload.MAX_PART_SIZE);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+
+      // then the number of parts is increasing
+      length += 1;
+      chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUpload.MAX_PART_SIZE);
+      assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE);
+      assertEquals(strategy.getRemaining(), 1);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+   }
+   
+   /**
+    * Phase 3 of the algorithm.
+    * The number of parts are increasing until {@code MAX_NUMBER_OF_PARTS}
+    * while its size does not exceeds the {@code MultipartUploadStrategy.MAX_PART_SIZE}. 
+    */
+   @Test
+   public void testWhenPartsExceedsMaxNumberOfParts() {
+      MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm();
+      // upper limit while we still have exactly MAX_NUMBER_OF_PARTS parts (together with the remaining)
+      long length = MultipartUpload.MAX_PART_SIZE * MultipartUpload.MAX_NUMBER_OF_PARTS;
+      long chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUpload.MAX_PART_SIZE);
+      assertEquals(strategy.getParts(), MultipartUpload.MAX_NUMBER_OF_PARTS - 1);
+      assertEquals(strategy.getRemaining(), MultipartUpload.MAX_PART_SIZE);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+
+      // then the number of parts is increasing
+      length += 1;
+      chunkSize = strategy.calculateChunkSize(length);
+      assertEquals(chunkSize, MultipartUpload.MAX_PART_SIZE);
+      assertEquals(strategy.getParts(), MultipartUpload.MAX_NUMBER_OF_PARTS);
+      assertEquals(strategy.getRemaining(), 1);
+      assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
+   } 
+}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategyMockTest.java
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategyMockTest.java b/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategyMockTest.java
new file mode 100644
index 0000000..cdff2cc
--- /dev/null
+++ b/apis/s3/src/test/java/org/jclouds/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategyMockTest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.jclouds.s3.blobstore.strategy.internal;
+
+import static com.google.common.util.concurrent.MoreExecutors.sameThreadExecutor;
+import static org.jclouds.Constants.PROPERTY_MAX_RETRIES;
+import static org.jclouds.Constants.PROPERTY_SO_TIMEOUT;
+import static org.jclouds.s3.reference.S3Constants.PROPERTY_S3_VIRTUAL_HOST_BUCKETS;
+import static org.testng.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.Set;
+
+import org.jclouds.ContextBuilder;
+import org.jclouds.blobstore.domain.internal.BlobBuilderImpl;
+import org.jclouds.concurrent.config.ExecutorServiceModule;
+import org.jclouds.http.HttpResponseException;
+import org.testng.annotations.Test;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.net.HttpHeaders;
+import com.google.common.net.MediaType;
+import com.google.inject.Module;
+import com.squareup.okhttp.mockwebserver.MockResponse;
+import com.squareup.okhttp.mockwebserver.MockWebServer;
+import com.squareup.okhttp.mockwebserver.RecordedRequest;
+
+@Test(singleThreaded = true)
+public class SequentialMultipartUploadStrategyMockTest {
+
+   @Test
+   public void testMPUDoesMultipart() throws IOException, InterruptedException {
+      MockWebServer server = new MockWebServer();
+      server.enqueue(new MockResponse().setResponseCode(200).setBody("<UploadId>upload-id</UploadId>"));
+      server.enqueue(new MockResponse().setResponseCode(200).addHeader("ETag", "a00"));
+      server.enqueue(new MockResponse().setResponseCode(200).addHeader("ETag", "b00"));
+      server.enqueue(new MockResponse().setResponseCode(200).setBody("<ETag>fff</ETag>"));
+      server.play();
+
+      byte[] bytes = "0123456789abcdef".getBytes(Charsets.US_ASCII);
+      int partSize = bytes.length / 2;
+
+      SequentialMultipartUploadStrategy api = mockSequentialMultipartUploadStrategy(server.getUrl("/").toString(),
+            partSize);
+
+      try {
+         assertEquals(api.execute("container", new BlobBuilderImpl().name("foo").payload(bytes)
+            .contentDisposition("inline; filename=foo.mp4")
+            .contentType(MediaType.MP4_VIDEO.toString())
+            .build()), "fff");
+      } finally {
+
+         RecordedRequest initiate = server.takeRequest();
+         assertEquals(initiate.getRequestLine(), "POST /container/foo?uploads HTTP/1.1");
+         assertEquals(initiate.getHeader("Content-Length"), "0");
+         assertEquals(initiate.getHeader(HttpHeaders.CONTENT_TYPE), MediaType.MP4_VIDEO.toString());
+         assertEquals(initiate.getHeader(HttpHeaders.CONTENT_DISPOSITION), "inline; filename=foo.mp4");
+
+         RecordedRequest part1 = server.takeRequest();
+         assertEquals(part1.getRequestLine(), "PUT /container/foo?partNumber=1&uploadId=upload-id HTTP/1.1");
+         assertEquals(part1.getHeader("Content-Length"), String.valueOf(partSize));
+         assertEquals(new String(part1.getBody()), "01234567");
+
+         RecordedRequest part2 = server.takeRequest();
+         assertEquals(part2.getRequestLine(), "PUT /container/foo?partNumber=2&uploadId=upload-id HTTP/1.1");
+         assertEquals(part2.getHeader("Content-Length"), String.valueOf(partSize));
+         assertEquals(new String(part2.getBody()), "89abcdef");
+
+         RecordedRequest manifest = server.takeRequest();
+         assertEquals(manifest.getRequestLine(), "POST /container/foo?uploadId=upload-id HTTP/1.1");
+         assertEquals(manifest.getHeader("Content-Length"), "161");
+         assertEquals(
+               new String(manifest.getBody()),
+               "<CompleteMultipartUpload><Part><PartNumber>1</PartNumber><ETag>a00</ETag></Part><Part><PartNumber>2</PartNumber><ETag>b00</ETag></Part></CompleteMultipartUpload>");
+
+         server.shutdown();
+      }
+   }
+
+   @Test(expectedExceptions = HttpResponseException.class)
+   public void testMPUAbortsOnProblem() throws IOException, InterruptedException {
+      MockWebServer server = new MockWebServer();
+      server.enqueue(new MockResponse().setResponseCode(200).setBody("<UploadId>upload-id</UploadId>"));
+      server.enqueue(new MockResponse().setResponseCode(400));
+      server.enqueue(new MockResponse().setResponseCode(200));
+      server.play();
+
+      byte[] bytes = "0123456789abcdef".getBytes(Charsets.US_ASCII);
+      int partSize = bytes.length / 2;
+
+      SequentialMultipartUploadStrategy api = mockSequentialMultipartUploadStrategy(server.getUrl("/").toString(),
+            partSize);
+
+      try {
+         assertEquals(api.execute("container", new BlobBuilderImpl().name("foo").payload(bytes).build()), "fff");
+      } finally {
+
+         RecordedRequest initiate = server.takeRequest();
+         assertEquals(initiate.getRequestLine(), "POST /container/foo?uploads HTTP/1.1");
+         assertEquals(initiate.getHeader("Content-Length"), "0");
+
+         RecordedRequest part1 = server.takeRequest();
+         assertEquals(part1.getRequestLine(), "PUT /container/foo?partNumber=1&uploadId=upload-id HTTP/1.1");
+         assertEquals(part1.getHeader("Content-Length"), String.valueOf(partSize));
+         assertEquals(new String(part1.getBody()), "01234567");
+
+         RecordedRequest abort = server.takeRequest();
+         assertEquals(abort.getRequestLine(), "DELETE /container/foo?uploadId=upload-id HTTP/1.1");
+
+         server.shutdown();
+      }
+   }
+
+   private static final Set<Module> modules = ImmutableSet.<Module>of(
+         new ExecutorServiceModule(sameThreadExecutor()));
+
+   static SequentialMultipartUploadStrategy mockSequentialMultipartUploadStrategy(String uri, int partSize) {
+      Properties overrides = new Properties();
+      overrides.setProperty(PROPERTY_S3_VIRTUAL_HOST_BUCKETS, "false");
+      // prevent expect-100 bug http://code.google.com/p/mockwebserver/issues/detail?id=6
+      overrides.setProperty(PROPERTY_SO_TIMEOUT, "0");
+      overrides.setProperty(PROPERTY_MAX_RETRIES, "1");
+      overrides.setProperty("jclouds.mpu.parts.size", String.valueOf(partSize));
+      return ContextBuilder.newBuilder("s3")
+                           .credentials("accessKey", "secretKey")
+                           .endpoint(uri)
+                           .overrides(overrides)
+                           .modules(modules)
+                           .buildInjector().getInstance(SequentialMultipartUploadStrategy.class);
+   }
+}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/java/org/jclouds/s3/functions/ETagFromHttpResponseViaRegexTest.java
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/java/org/jclouds/s3/functions/ETagFromHttpResponseViaRegexTest.java b/apis/s3/src/test/java/org/jclouds/s3/functions/ETagFromHttpResponseViaRegexTest.java
new file mode 100644
index 0000000..8b85f1a
--- /dev/null
+++ b/apis/s3/src/test/java/org/jclouds/s3/functions/ETagFromHttpResponseViaRegexTest.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.jclouds.s3.functions;
+
+import static org.testng.Assert.assertEquals;
+
+import org.jclouds.http.HttpResponse;
+import org.jclouds.http.functions.ReturnStringIf2xx;
+import org.jclouds.io.Payloads;
+import org.testng.annotations.Test;
+
+/**
+ * Tests behavior of {@code ETagFromHttpResponseViaRegex}
+ */
+// NOTE:without testName, this will not call @Before* and fail w/NPE during surefire
+@Test(groups = "unit", testName = "ETagFromHttpResponseViaRegexTest")
+public class ETagFromHttpResponseViaRegexTest {
+
+   @Test
+   public void test() {
+
+      HttpResponse response = HttpResponse.builder().statusCode(200).payload(
+               Payloads.newInputStreamPayload(getClass().getResourceAsStream("/complete-multipart-upload.xml")))
+               .build();
+      ETagFromHttpResponseViaRegex parser = new ETagFromHttpResponseViaRegex(new ReturnStringIf2xx());
+
+      assertEquals(parser.apply(response), "\"3858f62230ac3c915f300c664312c11f-9\"");
+   }
+
+}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/java/org/jclouds/s3/functions/UploadIdFromHttpResponseViaRegexTest.java
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/java/org/jclouds/s3/functions/UploadIdFromHttpResponseViaRegexTest.java b/apis/s3/src/test/java/org/jclouds/s3/functions/UploadIdFromHttpResponseViaRegexTest.java
new file mode 100644
index 0000000..9aee985
--- /dev/null
+++ b/apis/s3/src/test/java/org/jclouds/s3/functions/UploadIdFromHttpResponseViaRegexTest.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.jclouds.s3.functions;
+
+import static org.testng.Assert.assertEquals;
+
+import org.jclouds.http.HttpResponse;
+import org.jclouds.http.functions.ReturnStringIf2xx;
+import org.jclouds.io.Payloads;
+import org.testng.annotations.Test;
+
+/**
+ * Tests behavior of {@code UploadIdFromHttpResponseViaRegex}
+ */
+// NOTE:without testName, this will not call @Before* and fail w/NPE during surefire
+@Test(groups = "unit", testName = "UploadIdFromHttpResponseViaRegexTest")
+public class UploadIdFromHttpResponseViaRegexTest {
+
+   @Test
+   public void test() {
+
+      HttpResponse response = HttpResponse.builder().statusCode(200).payload(
+               Payloads.newInputStreamPayload(getClass().getResourceAsStream("/initiate-multipart-upload.xml")))
+               .build();
+      UploadIdFromHttpResponseViaRegex parser = new UploadIdFromHttpResponseViaRegex(new ReturnStringIf2xx());
+
+      assertEquals(parser.apply(response), "VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA");
+   }
+
+}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/resources/complete-multipart-upload.xml
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/resources/complete-multipart-upload.xml b/apis/s3/src/test/resources/complete-multipart-upload.xml
new file mode 100644
index 0000000..9d52646
--- /dev/null
+++ b/apis/s3/src/test/resources/complete-multipart-upload.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+  <Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>
+  <Bucket>Example-Bucket</Bucket>
+  <Key>Example-Object</Key>
+  <ETag>&quot;3858f62230ac3c915f300c664312c11f-9&quot;</ETag>
+</CompleteMultipartUploadResult>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/apis/s3/src/test/resources/initiate-multipart-upload.xml
----------------------------------------------------------------------
diff --git a/apis/s3/src/test/resources/initiate-multipart-upload.xml b/apis/s3/src/test/resources/initiate-multipart-upload.xml
new file mode 100644
index 0000000..7b9a0aa
--- /dev/null
+++ b/apis/s3/src/test/resources/initiate-multipart-upload.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+  <Bucket>example-bucket</Bucket>
+  <Key>example-object</Key>
+  <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>
+</InitiateMultipartUploadResult>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/AWSS3Client.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/AWSS3Client.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/AWSS3Client.java
index 7dccd70..0f9176c 100644
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/AWSS3Client.java
+++ b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/AWSS3Client.java
@@ -16,46 +16,27 @@
  */
 package org.jclouds.aws.s3;
 
-import static org.jclouds.Fallbacks.VoidOnNotFoundOr404;
 import static org.jclouds.blobstore.attr.BlobScopes.CONTAINER;
 
-import java.util.Map;
-
 import javax.inject.Named;
-import javax.ws.rs.DELETE;
 import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.QueryParam;
 
 import org.jclouds.aws.s3.binders.BindIterableAsPayloadToDeleteRequest;
-import org.jclouds.aws.s3.binders.BindObjectMetadataToRequest;
-import org.jclouds.aws.s3.binders.BindPartIdsAndETagsToRequest;
 import org.jclouds.aws.s3.domain.DeleteResult;
-import org.jclouds.aws.s3.functions.ETagFromHttpResponseViaRegex;
-import org.jclouds.aws.s3.functions.ObjectMetadataKey;
-import org.jclouds.aws.s3.functions.UploadIdFromHttpResponseViaRegex;
 import org.jclouds.aws.s3.xml.DeleteResultHandler;
 import org.jclouds.blobstore.attr.BlobScope;
-import org.jclouds.http.functions.ParseETagHeader;
-import org.jclouds.io.Payload;
 import org.jclouds.rest.annotations.BinderParam;
 import org.jclouds.rest.annotations.EndpointParam;
-import org.jclouds.rest.annotations.Fallback;
-import org.jclouds.rest.annotations.ParamParser;
 import org.jclouds.rest.annotations.ParamValidators;
 import org.jclouds.rest.annotations.QueryParams;
 import org.jclouds.rest.annotations.RequestFilters;
-import org.jclouds.rest.annotations.ResponseParser;
 import org.jclouds.rest.annotations.XMLResponseParser;
 import org.jclouds.s3.Bucket;
 import org.jclouds.s3.S3Client;
 import org.jclouds.s3.binders.BindAsHostPrefixIfConfigured;
-import org.jclouds.s3.domain.ObjectMetadata;
 import org.jclouds.s3.filters.RequestAuthorizeSignature;
 import org.jclouds.s3.functions.AssignCorrectHostnameForBucket;
-import org.jclouds.s3.options.PutObjectOptions;
 import org.jclouds.s3.predicates.validators.BucketNameValidator;
 
 /**
@@ -66,138 +47,6 @@ import org.jclouds.s3.predicates.validators.BucketNameValidator;
 public interface AWSS3Client extends S3Client {
 
    /**
-    * This operation initiates a multipart upload and returns an upload ID. This upload ID is used
-    * to associate all the parts in the specific multipart upload. You specify this upload ID in
-    * each of your subsequent upload part requests (see Upload Part). You also include this upload
-    * ID in the final request to either complete or abort the multipart upload request.
-    *
-    * <h4>Note</h4> If you create an object using the multipart upload APIs, currently you cannot
-    * copy the object between regions.
-    *
-    *
-    * @param bucketName
-    *           namespace of the object you are to upload
-    * @param objectMetadata
-    *           metadata around the object you wish to upload
-    * @param options
-    *           controls optional parameters such as canned ACL
-    * @return ID for the initiated multipart upload.
-    */
-   @Named("PutObject")
-   @POST
-   @QueryParams(keys = "uploads")
-   @Path("/{key}")
-   @ResponseParser(UploadIdFromHttpResponseViaRegex.class)
-   String initiateMultipartUpload(@Bucket @EndpointParam(parser = AssignCorrectHostnameForBucket.class) @BinderParam(
-         BindAsHostPrefixIfConfigured.class) @ParamValidators(BucketNameValidator.class) String bucketName,
-         @PathParam("key") @ParamParser(ObjectMetadataKey.class) @BinderParam(BindObjectMetadataToRequest.class)
-         ObjectMetadata objectMetadata, PutObjectOptions... options);
-
-   /**
-    * This operation aborts a multipart upload. After a multipart upload is aborted, no additional
-    * parts can be uploaded using that upload ID. The storage consumed by any previously uploaded
-    * parts will be freed. However, if any part uploads are currently in progress, those part
-    * uploads might or might not succeed. As a result, it might be necessary to abort a given
-    * multipart upload multiple times in order to completely free all storage consumed by all parts.
-    *
-    *
-    * @param bucketName
-    *           namespace of the object you are deleting
-    * @param key
-    *           unique key in the s3Bucket identifying the object
-    * @param uploadId
-    *           id of the multipart upload in progress.
-    */
-   @Named("AbortMultipartUpload")
-   @DELETE
-   @Path("/{key}")
-   @Fallback(VoidOnNotFoundOr404.class)
-   void abortMultipartUpload(@Bucket @EndpointParam(parser = AssignCorrectHostnameForBucket.class) @BinderParam(
-         BindAsHostPrefixIfConfigured.class) @ParamValidators(BucketNameValidator.class) String bucketName,
-         @PathParam("key") String key, @QueryParam("uploadId") String uploadId);
-
-   /**
-    * This operation uploads a part in a multipart upload. You must initiate a multipart upload (see
-    * Initiate Multipart Upload) before you can upload any part. In response to your initiate
-    * request. Amazon S3 returns an upload ID, a unique identifier, that you must include in your
-    * upload part request.
-    *
-    * <p/>
-    * Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies
-    * a part and also defines its position within the object being created. If you upload a new part
-    * using the same part number that was used with a previous part, the previously uploaded part is
-    * overwritten. Each part must be at least 5 MB in size, except the last part. There is no size
-    * limit on the last part of your multipart upload.
-    *
-    * <p/>
-    * To ensure that data is not corrupted when traversing the network, specify the Content-MD5
-    * header in the upload part request. Amazon S3 checks the part data against the provided MD5
-    * value. If they do not match, Amazon S3 returns an error.
-    *
-    *
-    * @param bucketName
-    *           namespace of the object you are storing
-    * @param key
-    *           unique key in the s3Bucket identifying the object
-    * @param partNumber
-    *           which part is this.
-    * @param uploadId
-    *           id of the multipart upload in progress.
-    * @param part
-    *           contains the data to create or overwrite
-    * @return ETag of the content uploaded
-    */
-   @Named("PutObject")
-   @PUT
-   @Path("/{key}")
-   @ResponseParser(ParseETagHeader.class)
-   String uploadPart(@Bucket @EndpointParam(parser = AssignCorrectHostnameForBucket.class) @BinderParam(
-         BindAsHostPrefixIfConfigured.class) @ParamValidators(BucketNameValidator.class) String bucketName,
-         @PathParam("key") String key, @QueryParam("partNumber") int partNumber,
-         @QueryParam("uploadId") String uploadId, Payload part);
-
-   /**
-    *
-    This operation completes a multipart upload by assembling previously uploaded parts.
-    * <p/>
-    * You first initiate the multipart upload and then upload all parts using the Upload Parts
-    * operation (see Upload Part). After successfully uploading all relevant parts of an upload, you
-    * call this operation to complete the upload. Upon receiving this request, Amazon S3
-    * concatenates all the parts in ascending order by part number to create a new object. In the
-    * Complete Multipart Upload request, you must provide the parts list. For each part in the list,
-    * you must provide the part number and the ETag header value, returned after that part was
-    * uploaded.
-    * <p/>
-    * Processing of a Complete Multipart Upload request could take several minutes to complete.
-    * After Amazon S3 begins processing the request, it sends an HTTP response header that specifies
-    * a 200 OK response. While processing is in progress, Amazon S3 periodically sends whitespace
-    * characters to keep the connection from timing out. Because a request could fail after the
-    * initial 200 OK response has been sent, it is important that you check the response body to
-    * determine whether the request succeeded.
-    * <p/>
-    * Note that if Complete Multipart Upload fails, applications should be prepared to retry the
-    * failed requests.
-    *
-    * @param bucketName
-    *           namespace of the object you are deleting
-    * @param key
-    *           unique key in the s3Bucket identifying the object
-    * @param uploadId
-    *           id of the multipart upload in progress.
-    * @param parts
-    *           a map of part id to eTag from the {@link #uploadPart} command.
-    * @return ETag of the content uploaded
-    */
-   @Named("PutObject")
-   @POST
-   @Path("/{key}")
-   @ResponseParser(ETagFromHttpResponseViaRegex.class)
-   String completeMultipartUpload(@Bucket @EndpointParam(parser = AssignCorrectHostnameForBucket.class) @BinderParam(
-         BindAsHostPrefixIfConfigured.class) @ParamValidators(BucketNameValidator.class) String bucketName,
-         @PathParam("key") String key, @QueryParam("uploadId") String uploadId,
-         @BinderParam(BindPartIdsAndETagsToRequest.class) Map<Integer, String> parts);
-
-   /**
     * The Multi-Object Delete operation enables you to delete multiple objects from a bucket using a 
     * single HTTP request. If you know the object keys that you want to delete, then this operation 
     * provides a suitable alternative to sending individual delete requests (see DELETE Object), 

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindObjectMetadataToRequest.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindObjectMetadataToRequest.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindObjectMetadataToRequest.java
deleted file mode 100644
index f880353..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindObjectMetadataToRequest.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.binders;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-import static com.google.common.io.BaseEncoding.base64;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-import org.jclouds.blobstore.binders.BindMapToHeadersWithPrefix;
-import org.jclouds.http.HttpRequest;
-import org.jclouds.rest.Binder;
-import org.jclouds.s3.domain.ObjectMetadata;
-
-import com.google.common.collect.ImmutableMultimap;
-import com.google.common.collect.ImmutableMultimap.Builder;
-import com.google.common.net.HttpHeaders;
-
-@Singleton
-public class BindObjectMetadataToRequest implements Binder {
-   protected final BindMapToHeadersWithPrefix metadataPrefixer;
-
-   @Inject
-   public BindObjectMetadataToRequest(BindMapToHeadersWithPrefix metadataPrefixer) {
-      this.metadataPrefixer = checkNotNull(metadataPrefixer, "metadataPrefixer");
-   }
-
-   @SuppressWarnings("unchecked")
-   @Override
-   public <R extends HttpRequest> R bindToRequest(R request, Object input) {
-      checkArgument(checkNotNull(input, "input") instanceof ObjectMetadata,
-               "this binder is only valid for ObjectMetadata!");
-      checkNotNull(request, "request");
-
-      ObjectMetadata md = ObjectMetadata.class.cast(input);
-      checkArgument(md.getKey() != null, "objectMetadata.getKey() must be set!");
-
-      request = metadataPrefixer.bindToRequest(request, md.getUserMetadata());
-
-      Builder<String, String> headers = ImmutableMultimap.builder();
-      if (md.getCacheControl() != null) {
-         headers.put(HttpHeaders.CACHE_CONTROL, md.getCacheControl());
-      }
-
-      if (md.getContentMetadata().getContentDisposition() != null) {
-         headers.put("Content-Disposition", md.getContentMetadata().getContentDisposition());
-      }
-
-      if (md.getContentMetadata().getContentEncoding() != null) {
-         headers.put("Content-Encoding", md.getContentMetadata().getContentEncoding());
-      }
-
-      String contentLanguage = md.getContentMetadata().getContentLanguage();
-      if (contentLanguage != null) {
-         headers.put(HttpHeaders.CONTENT_LANGUAGE, contentLanguage);
-      }
-
-      if (md.getContentMetadata().getContentType() != null) {
-         headers.put(HttpHeaders.CONTENT_TYPE, md.getContentMetadata().getContentType());
-      } else {
-         headers.put(HttpHeaders.CONTENT_TYPE, "binary/octet-stream");
-      }
-
-      if (md.getContentMetadata().getContentMD5() != null) {
-         headers.put("Content-MD5", base64().encode(md.getContentMetadata().getContentMD5()));
-      }
-
-      return (R) request.toBuilder().replaceHeaders(headers.build()).build();
-   }
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindPartIdsAndETagsToRequest.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindPartIdsAndETagsToRequest.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindPartIdsAndETagsToRequest.java
deleted file mode 100644
index 464c206..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/binders/BindPartIdsAndETagsToRequest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.binders;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.Map;
-import java.util.Map.Entry;
-
-import javax.inject.Singleton;
-import javax.ws.rs.core.MediaType;
-
-import org.jclouds.http.HttpRequest;
-import org.jclouds.io.Payload;
-import org.jclouds.io.Payloads;
-import org.jclouds.rest.Binder;
-
-@Singleton
-public class BindPartIdsAndETagsToRequest implements Binder {
-
-   @SuppressWarnings("unchecked")
-   @Override
-   public <R extends HttpRequest> R bindToRequest(R request, Object input) {
-      checkArgument(checkNotNull(input, "input") instanceof Map, "this binder is only valid for Map!");
-      checkNotNull(request, "request");
-
-      Map<Integer, String> map = (Map<Integer, String>) input;
-      checkArgument(!map.isEmpty(), "Please send parts");
-      StringBuilder content = new StringBuilder();
-      content.append("<CompleteMultipartUpload>");
-      for (Entry<Integer, String> entry : map.entrySet()) {
-         content.append("<Part>");
-         content.append("<PartNumber>").append(entry.getKey()).append("</PartNumber>");
-         content.append("<ETag>").append(entry.getValue()).append("</ETag>");
-         content.append("</Part>");
-      }
-      content.append("</CompleteMultipartUpload>");
-      Payload payload = Payloads.newStringPayload(content.toString());
-      payload.getContentMetadata().setContentType(MediaType.TEXT_XML);
-      request.setPayload(payload);
-      return request;
-   }
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/AWSS3BlobStore.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/AWSS3BlobStore.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/AWSS3BlobStore.java
index a9b8b1e..4450737 100644
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/AWSS3BlobStore.java
+++ b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/AWSS3BlobStore.java
@@ -27,7 +27,6 @@ import org.jclouds.aws.domain.Region;
 import org.jclouds.aws.s3.AWSS3Client;
 import org.jclouds.aws.s3.blobstore.options.AWSS3PutObjectOptions;
 import org.jclouds.aws.s3.blobstore.options.AWSS3PutOptions;
-import org.jclouds.aws.s3.blobstore.strategy.MultipartUploadStrategy;
 import org.jclouds.blobstore.BlobStoreContext;
 import org.jclouds.blobstore.domain.Blob;
 import org.jclouds.blobstore.domain.PageSet;
@@ -45,6 +44,7 @@ import org.jclouds.s3.blobstore.functions.BucketToResourceList;
 import org.jclouds.s3.blobstore.functions.ContainerToBucketListOptions;
 import org.jclouds.s3.blobstore.functions.ObjectToBlob;
 import org.jclouds.s3.blobstore.functions.ObjectToBlobMetadata;
+import org.jclouds.s3.blobstore.strategy.MultipartUploadStrategy;
 import org.jclouds.s3.domain.AccessControlList;
 import org.jclouds.s3.domain.BucketMetadata;
 import org.jclouds.s3.domain.CannedAccessPolicy;
@@ -60,7 +60,6 @@ import com.google.common.cache.LoadingCache;
  */
 public class AWSS3BlobStore extends S3BlobStore {
 
-   private final Provider<MultipartUploadStrategy> multipartUploadStrategy;
    private final LoadingCache<String, AccessControlList> bucketAcls;
    private final BlobToObject blob2Object;
 
@@ -75,8 +74,8 @@ public class AWSS3BlobStore extends S3BlobStore {
             Provider<MultipartUploadStrategy> multipartUploadStrategy) {
       super(context, blobUtils, defaultLocation, locations, sync, convertBucketsToStorageMetadata,
                container2BucketListOptions, bucket2ResourceList, object2Blob, blob2ObjectGetOptions, blob2Object,
-               object2BlobMd, fetchBlobMetadataProvider, bucketAcls);
-      this.multipartUploadStrategy = multipartUploadStrategy;
+               object2BlobMd, fetchBlobMetadataProvider, bucketAcls,
+               multipartUploadStrategy);
       this.bucketAcls = bucketAcls;
       this.blob2Object = blob2Object;
    }

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/config/AWSS3BlobStoreContextModule.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/config/AWSS3BlobStoreContextModule.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/config/AWSS3BlobStoreContextModule.java
index bfd6716..6c551d5 100644
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/config/AWSS3BlobStoreContextModule.java
+++ b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/config/AWSS3BlobStoreContextModule.java
@@ -18,10 +18,6 @@ package org.jclouds.aws.s3.blobstore.config;
 
 import org.jclouds.aws.s3.blobstore.AWSS3BlobRequestSigner;
 import org.jclouds.aws.s3.blobstore.AWSS3BlobStore;
-import org.jclouds.aws.s3.blobstore.strategy.AsyncMultipartUploadStrategy;
-import org.jclouds.aws.s3.blobstore.strategy.MultipartUploadStrategy;
-import org.jclouds.aws.s3.blobstore.strategy.internal.ParallelMultipartUploadStrategy;
-import org.jclouds.aws.s3.blobstore.strategy.internal.SequentialMultipartUploadStrategy;
 import org.jclouds.blobstore.BlobRequestSigner;
 import org.jclouds.s3.blobstore.S3BlobStore;
 import org.jclouds.s3.blobstore.config.S3BlobStoreContextModule;
@@ -34,8 +30,6 @@ public class AWSS3BlobStoreContextModule extends S3BlobStoreContextModule {
    protected void configure() {
       super.configure();
       bind(S3BlobStore.class).to(AWSS3BlobStore.class).in(Scopes.SINGLETON);
-      bind(MultipartUploadStrategy.class).to(SequentialMultipartUploadStrategy.class);
-      bind(AsyncMultipartUploadStrategy.class).to(ParallelMultipartUploadStrategy.class);
    }
 
    @Override

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/AsyncMultipartUploadStrategy.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/AsyncMultipartUploadStrategy.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/AsyncMultipartUploadStrategy.java
deleted file mode 100644
index 3f638ae..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/AsyncMultipartUploadStrategy.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.blobstore.strategy;
-
-import org.jclouds.aws.s3.blobstore.strategy.internal.ParallelMultipartUploadStrategy;
-import org.jclouds.blobstore.domain.Blob;
-import org.jclouds.blobstore.options.PutOptions;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.inject.ImplementedBy;
-
-@ImplementedBy(ParallelMultipartUploadStrategy.class)
-public interface AsyncMultipartUploadStrategy {
-   
-   ListenableFuture<String> execute(String container, Blob blob, PutOptions options);
-
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUpload.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUpload.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUpload.java
deleted file mode 100644
index 5342e67..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUpload.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.blobstore.strategy;
-
-public final class MultipartUpload {
-
-   /* Maximum number of parts per upload */
-   public static final int MAX_NUMBER_OF_PARTS = 10000;
-   /* Maximum number of parts returned for a list parts request */
-   public static final int MAX_LIST_PARTS_RETURNED = 1000;
-   /* Maximum number of multipart uploads returned in a list multipart uploads request */
-   public static final int MAX_LIST_MPU_RETURNED = 1000;
-
-   /*
-    * part size 5 MB to 5 GB, last part can be < 5 MB
-    */
-   public static final long MIN_PART_SIZE = 5242880L;
-   public static final long MAX_PART_SIZE = 5368709120L;
-
-   private MultipartUpload() {
-      throw new AssertionError("intentionally unimplemented");
-   }
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUploadStrategy.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUploadStrategy.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUploadStrategy.java
deleted file mode 100644
index 12b7ef3..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/MultipartUploadStrategy.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.blobstore.strategy;
-
-import org.jclouds.aws.s3.blobstore.strategy.internal.SequentialMultipartUploadStrategy;
-import org.jclouds.blobstore.domain.Blob;
-
-import com.google.inject.ImplementedBy;
-
-@ImplementedBy(SequentialMultipartUploadStrategy.class)
-public interface MultipartUploadStrategy {
-   
-   String execute(String container, Blob blob);
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/MultipartUploadSlicingAlgorithm.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/MultipartUploadSlicingAlgorithm.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/MultipartUploadSlicingAlgorithm.java
deleted file mode 100644
index 8620541..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/MultipartUploadSlicingAlgorithm.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- *   MultipartUploadSlicingAlgorithm.java
- *
- * 
- *   Created by: tibor
- *
- *   History
- */
-
-package org.jclouds.aws.s3.blobstore.strategy.internal;
-
-import javax.annotation.Resource;
-import javax.inject.Named;
-
-import org.jclouds.aws.s3.blobstore.strategy.MultipartUpload;
-import org.jclouds.blobstore.reference.BlobStoreConstants;
-import org.jclouds.logging.Logger;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.inject.Inject;
-
-public class MultipartUploadSlicingAlgorithm {
-
-   @Resource
-   @Named(BlobStoreConstants.BLOBSTORE_LOGGER)
-   protected Logger logger = Logger.NULL;
-
-   @VisibleForTesting
-   static final long DEFAULT_PART_SIZE = 33554432; // 32MB
-   
-   @VisibleForTesting
-   static final int DEFAULT_MAGNITUDE_BASE = 100;
-   
-   @Inject(optional = true)
-   @Named("jclouds.mpu.parts.size")
-   @VisibleForTesting
-   long defaultPartSize = DEFAULT_PART_SIZE;
-   
-   @Inject(optional = true)
-   @Named("jclouds.mpu.parts.magnitude")
-   @VisibleForTesting
-   int magnitudeBase = DEFAULT_MAGNITUDE_BASE;
-
-   // calculated only once, but not from the constructor
-   private volatile int parts; // required number of parts with chunkSize
-   private volatile long chunkSize;
-   private volatile long remaining; // number of bytes remained for the last part
-
-   // sequentially updated values
-   private volatile int part;
-   private volatile long chunkOffset;
-   private volatile long copied;
-
-   @VisibleForTesting
-   protected long calculateChunkSize(long length) {
-      long unitPartSize = defaultPartSize; // first try with default part size
-      int parts = (int)(length / unitPartSize);
-      long partSize = unitPartSize;
-      int magnitude = parts / magnitudeBase;
-      if (magnitude > 0) {
-         partSize = magnitude * unitPartSize;
-         if (partSize > MultipartUpload.MAX_PART_SIZE) {
-            partSize = MultipartUpload.MAX_PART_SIZE;
-            unitPartSize = MultipartUpload.MAX_PART_SIZE;
-         }
-         parts = (int)(length / partSize);
-         if (parts * partSize < length) {
-            partSize = (magnitude + 1) * unitPartSize;
-            if (partSize > MultipartUpload.MAX_PART_SIZE) {
-               partSize = MultipartUpload.MAX_PART_SIZE;
-               unitPartSize = MultipartUpload.MAX_PART_SIZE;
-            }
-            parts = (int)(length / partSize);
-         }
-      }
-      if (parts > MultipartUpload.MAX_NUMBER_OF_PARTS) { // if splits in too many parts or
-                                         // cannot be split
-         unitPartSize = MultipartUpload.MIN_PART_SIZE; // take the minimum part size
-         parts = (int)(length / unitPartSize);
-      }
-      if (parts > MultipartUpload.MAX_NUMBER_OF_PARTS) { // if still splits in too many parts
-         parts = MultipartUpload.MAX_NUMBER_OF_PARTS - 1; // limit them. do not care about not
-                                          // covering
-      }
-      long remainder = length % unitPartSize;
-      if (remainder == 0 && parts > 0) {
-         parts -= 1;
-      }
-      this.chunkSize = partSize;
-      this.parts = parts;
-      this.remaining = length - partSize * parts;
-      logger.debug(" %d bytes partitioned in %d parts of part size: %d, remaining: %d%s", length, parts, chunkSize,
-            remaining, remaining > MultipartUpload.MAX_PART_SIZE ? " overflow!" : "");
-      return this.chunkSize;
-   }
-
-   public long getCopied() {
-      return copied;
-   }
-
-   public void setCopied(long copied) {
-      this.copied = copied;
-   }
-
-   @VisibleForTesting
-   protected int getParts() {
-      return parts;
-   }
-
-   protected int getNextPart() {
-      return ++part;
-   }
-
-   protected void addCopied(long copied) {
-      this.copied += copied;
-   }
-
-   protected long getNextChunkOffset() {
-      long next = chunkOffset;
-      chunkOffset += getChunkSize();
-      return next;
-   }
-
-   @VisibleForTesting
-   protected long getChunkSize() {
-      return chunkSize;
-   }
-
-   @VisibleForTesting
-   protected long getRemaining() {
-      return remaining;
-   }
-
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/ParallelMultipartUploadStrategy.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/ParallelMultipartUploadStrategy.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/ParallelMultipartUploadStrategy.java
deleted file mode 100644
index 9694274..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/ParallelMultipartUploadStrategy.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.blobstore.strategy.internal;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.Map;
-import java.util.Queue;
-import java.util.SortedMap;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import javax.annotation.Resource;
-import javax.inject.Named;
-
-import org.jclouds.Constants;
-import org.jclouds.aws.s3.AWSS3Client;
-import org.jclouds.aws.s3.blobstore.AWSS3BlobStore;
-import org.jclouds.aws.s3.blobstore.strategy.AsyncMultipartUploadStrategy;
-import org.jclouds.blobstore.domain.Blob;
-import org.jclouds.blobstore.internal.BlobRuntimeException;
-import org.jclouds.blobstore.options.PutOptions;
-import org.jclouds.blobstore.reference.BlobStoreConstants;
-import org.jclouds.io.Payload;
-import org.jclouds.io.PayloadSlicer;
-import org.jclouds.logging.Logger;
-import org.jclouds.s3.domain.ObjectMetadataBuilder;
-import org.jclouds.util.Throwables2;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.inject.Inject;
-
-public class ParallelMultipartUploadStrategy implements AsyncMultipartUploadStrategy {
-   @Resource
-   @Named(BlobStoreConstants.BLOBSTORE_LOGGER)
-   protected Logger logger = Logger.NULL;
-
-   @VisibleForTesting
-   static final int DEFAULT_PARALLEL_DEGREE = 4;
-   @VisibleForTesting
-   static final int DEFAULT_MIN_RETRIES = 5;
-   @VisibleForTesting
-   static final int DEFAULT_MAX_PERCENT_RETRIES = 10;
-
-   private final ListeningExecutorService executor;
-
-   @Inject(optional = true)
-   @Named("jclouds.mpu.parallel.degree")
-   @VisibleForTesting
-   int parallelDegree = DEFAULT_PARALLEL_DEGREE;
-
-   @Inject(optional = true)
-   @Named("jclouds.mpu.parallel.retries.min")
-   @VisibleForTesting
-   int minRetries = DEFAULT_MIN_RETRIES;
-
-   @Inject(optional = true)
-   @Named("jclouds.mpu.parallel.retries.maxpercent")
-   @VisibleForTesting
-   int maxPercentRetries = DEFAULT_MAX_PERCENT_RETRIES;
-
-   /**
-    * maximum duration of an blob Request
-    */
-   @Inject(optional = true)
-   @Named(Constants.PROPERTY_REQUEST_TIMEOUT)
-   protected Long maxTime;
-
-   protected final AWSS3BlobStore blobstore;
-   protected final PayloadSlicer slicer;
-
-   @Inject
-   public ParallelMultipartUploadStrategy(AWSS3BlobStore blobstore, PayloadSlicer slicer,
-         @Named(Constants.PROPERTY_USER_THREADS) ListeningExecutorService executor) {
-      this.blobstore = checkNotNull(blobstore, "blobstore");
-      this.slicer = checkNotNull(slicer, "slicer");
-      this.executor = checkNotNull(executor, "executor");
-   }
-
-   protected void prepareUploadPart(final String container, final String key,
-         final String uploadId, final Integer part, final Payload payload,
-         final long offset, final long size, final SortedMap<Integer, String> etags,
-         final BlockingQueue<Integer> activeParts,
-         final Map<Integer, ListenableFuture<String>> futureParts,
-         final AtomicInteger errors, final int maxRetries, final Map<Integer, Exception> errorMap,
-         final Queue<Part> toRetry, final CountDownLatch latch) {
-      if (errors.get() > maxRetries) {
-         activeParts.remove(part); // remove part from the bounded-queue without blocking
-         latch.countDown();
-         return;
-      }
-      final AWSS3Client client = blobstore.getContext().unwrapApi(AWSS3Client.class);
-      final Payload chunkedPart = slicer.slice(payload, offset, size);
-      logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key, container, uploadId));
-      final long start = System.currentTimeMillis();
-      final ListenableFuture<String> futureETag = executor.submit(new Callable<String>() {
-         @Override public String call() throws Exception {
-            return client.uploadPart(container, key, part, uploadId, chunkedPart);
-         }
-      });
-      futureETag.addListener(new Runnable() {
-         @Override
-         public void run() {
-            try {
-               etags.put(part, futureETag.get());
-               logger.debug(String.format("async uploaded part %s of %s to container %s in %sms with uploadId %s",
-                     part, key, container, System.currentTimeMillis() - start, uploadId));
-            } catch (CancellationException e) {
-               errorMap.put(part, e);
-               String message = String.format("%s while uploading part %s - [%s,%s] to container %s with uploadId: %s running since %dms",
-                     e.getMessage(), part, offset, size, container, uploadId, System.currentTimeMillis() - start);
-               logger.debug(message);
-            } catch (Exception e) {
-               errorMap.put(part, e);
-               String message = String.format("%s while uploading part %s - [%s,%s] to container %s with uploadId: %s running since %dms",
-                     e.getMessage(), part, offset, size, container, uploadId, System.currentTimeMillis() - start);
-               logger.error(message, e);
-               if (errors.incrementAndGet() <= maxRetries)
-                  toRetry.add(new Part(part, offset, size));
-            } finally {
-               activeParts.remove(part); // remove part from the bounded-queue without blocking
-               futureParts.remove(part);
-               latch.countDown();
-            }
-         }
-      }, executor);
-      futureParts.put(part, futureETag);
-   }
-
-   @Override
-   public ListenableFuture<String> execute(final String container, final Blob blob, final PutOptions options) {
-      return executor.submit(new Callable<String>() {
-               @Override
-               public String call() throws Exception {
-                  String key = blob.getMetadata().getName();
-                  Payload payload = blob.getPayload();
-                  MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm();
-                  algorithm.calculateChunkSize(payload.getContentMetadata()
-                        .getContentLength());
-                  int parts = algorithm.getParts();
-                  long chunkSize = algorithm.getChunkSize();
-                  long remaining = algorithm.getRemaining();
-                  if (parts > 0) {
-                     final AWSS3Client client = blobstore.getContext().unwrapApi(AWSS3Client.class);
-                     String uploadId = null;
-                     final Map<Integer, ListenableFuture<String>> futureParts =
-                        new ConcurrentHashMap<Integer, ListenableFuture<String>>();
-                     final Map<Integer, Exception> errorMap = Maps.newHashMap();
-                     AtomicInteger errors = new AtomicInteger(0);
-                     int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
-                     int effectiveParts = remaining > 0 ? parts + 1 : parts;
-                     try {
-                        uploadId = client.initiateMultipartUpload(container,
-                                 ObjectMetadataBuilder.create().key(key).build()); // TODO md5
-                        logger.debug(String.format("initiated multipart upload of %s to container %s" +
-                              " with uploadId %s consisting from %s part (possible max. retries: %d)",
-                              key, container, uploadId, effectiveParts, maxRetries));
-                        // we need a bounded-blocking queue to control the amount of parallel jobs
-                        ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
-                        Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
-                        SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
-                        CountDownLatch latch = new CountDownLatch(effectiveParts);
-                        int part;
-                        while ((part = algorithm.getNextPart()) <= parts) {
-                           Integer partKey = Integer.valueOf(part);
-                           activeParts.put(partKey);
-                           prepareUploadPart(container, key, uploadId, partKey, payload,
-                                 algorithm.getNextChunkOffset(), chunkSize, etags,
-                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
-                        }
-                        if (remaining > 0) {
-                           Integer partKey = Integer.valueOf(part);
-                           activeParts.put(partKey);
-                           prepareUploadPart(container, key, uploadId, partKey, payload,
-                                 algorithm.getNextChunkOffset(), remaining, etags,
-                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
-                        }
-                        latch.await();
-                        // handling retries
-                        while (errors.get() <= maxRetries && !toRetry.isEmpty()) {
-                           int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
-                           CountDownLatch retryLatch = new CountDownLatch(atOnce);
-                           for (int i = 0; i < atOnce; i++) {
-                              Part failedPart = toRetry.poll();
-                              Integer partKey = Integer.valueOf(failedPart.getPart());
-                              activeParts.put(partKey);
-                              prepareUploadPart(container, key, uploadId, partKey, payload,
-                                    failedPart.getOffset(), failedPart.getSize(), etags,
-                                    activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
-                           }
-                           retryLatch.await();
-                        }
-                        if (errors.get() > maxRetries) {
-                           throw new BlobRuntimeException(String.format(
-                                 "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
-                                 errors.get(), key, container, uploadId));
-                        }
-                        String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
-                        logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" +
-                            " successfully finished with %s retries", key, container, uploadId, errors.get()));
-                        return eTag;
-                     } catch (Exception ex) {
-                        RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
-                        if (rtex == null) {
-                           rtex = new RuntimeException(ex);
-                        }
-                        for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
-                           entry.getValue().cancel(false);
-                        }
-                        if (uploadId != null) {
-                           client.abortMultipartUpload(container, key, uploadId);
-                        }
-                        throw rtex;
-                     }
-                  } else {
-                     // Issue 936: don't just call putBlob, as that will see options=multiPart and
-                     // recursively call this execute method again; instead mark as not multipart
-                     // because it can all fit in one go.
-                     final PutOptions nonMultipartOptions = PutOptions.Builder.multipart(false);
-                     ListenableFuture<String> futureETag = executor.submit(new Callable<String>() {
-                        @Override public String call() throws Exception {
-                           return blobstore.putBlob(container, blob, nonMultipartOptions);
-                        }
-                     });
-                     return maxTime != null ?
-                           futureETag.get(maxTime, TimeUnit.SECONDS) : futureETag.get();
-                  }
-               }
-            });
-   }
-
-   static class Part {
-      private int part;
-      private long offset;
-      private long size;
-
-      Part(int part, long offset, long size) {
-         this.part = part;
-         this.offset = offset;
-         this.size = size;
-      }
-
-      public int getPart() {
-         return part;
-      }
-
-      public void setPart(int part) {
-         this.part = part;
-      }
-
-      public long getOffset() {
-         return offset;
-      }
-
-      public void setOffset(long offset) {
-         this.offset = offset;
-      }
-
-      public long getSize() {
-         return size;
-      }
-
-      public void setSize(long size) {
-         this.size = size;
-      }
-   }
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategy.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategy.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategy.java
deleted file mode 100644
index 9c15b4e..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/blobstore/strategy/internal/SequentialMultipartUploadStrategy.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.blobstore.strategy.internal;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.SortedMap;
-
-import javax.annotation.Resource;
-import javax.inject.Named;
-
-import org.jclouds.aws.s3.AWSS3Client;
-import org.jclouds.aws.s3.blobstore.strategy.MultipartUploadStrategy;
-import org.jclouds.blobstore.KeyNotFoundException;
-import org.jclouds.blobstore.domain.Blob;
-import org.jclouds.blobstore.reference.BlobStoreConstants;
-import org.jclouds.io.ContentMetadata;
-import org.jclouds.io.Payload;
-import org.jclouds.io.PayloadSlicer;
-import org.jclouds.logging.Logger;
-import org.jclouds.s3.blobstore.functions.BlobToObject;
-import org.jclouds.s3.domain.ObjectMetadataBuilder;
-
-import com.google.common.collect.Maps;
-import com.google.inject.Inject;
-
-/**
- * Provides a sequential multipart upload strategy.
- * 
- * The file partitioning algorithm:
- * 
- * The default partition size we choose is 32mb. A multiple of this default
- * partition size is used. The number of partCount first grows to a chosen magnitude
- * (for example 100 partCount), then it grows the partition size instead of number
- * of partitions. When we reached the maximum part size, then again it starts to
- * grow the number of partitions.
- */
-public class SequentialMultipartUploadStrategy implements MultipartUploadStrategy {
-   @Resource
-   @Named(BlobStoreConstants.BLOBSTORE_LOGGER)
-   private Logger logger = Logger.NULL;
-
-   private final AWSS3Client client;
-   private final BlobToObject blobToObject;
-   private final MultipartUploadSlicingAlgorithm algorithm;
-   private final PayloadSlicer slicer;
-
-   @Inject
-   public SequentialMultipartUploadStrategy(AWSS3Client client, BlobToObject blobToObject,
-         MultipartUploadSlicingAlgorithm algorithm, PayloadSlicer slicer) {
-      this.client = checkNotNull(client, "client");
-      this.blobToObject = checkNotNull(blobToObject, "blobToObject");
-      this.algorithm = checkNotNull(algorithm, "algorithm");
-      this.slicer = checkNotNull(slicer, "slicer");
-   }
-
-   @Override
-   public String execute(String container, Blob blob) {
-      String key = blob.getMetadata().getName();
-      ContentMetadata metadata = blob.getMetadata().getContentMetadata();
-      Payload payload = blob.getPayload();
-      Long length = payload.getContentMetadata().getContentLength();
-      checkNotNull(length,
-            "please invoke payload.getContentMetadata().setContentLength(length) prior to multipart upload");
-      long chunkSize = algorithm.calculateChunkSize(length);
-      int partCount = algorithm.getParts();
-      if (partCount > 0) {
-         ObjectMetadataBuilder builder = ObjectMetadataBuilder.create().key(key)
-            .contentType(metadata.getContentType())
-            .contentDisposition(metadata.getContentDisposition())
-            .contentEncoding(metadata.getContentEncoding())
-            .contentLanguage(metadata.getContentLanguage())
-            .userMetadata(blob.getMetadata().getUserMetadata());
-         String uploadId = client.initiateMultipartUpload(container, builder.build());
-         try {
-            SortedMap<Integer, String> etags = Maps.newTreeMap();
-            for (Payload part : slicer.slice(payload, chunkSize)) {
-               int partNum = algorithm.getNextPart();
-               prepareUploadPart(container, key, uploadId, partNum, part, algorithm.getNextChunkOffset(), etags);
-            }
-            return client.completeMultipartUpload(container, key, uploadId, etags);
-         } catch (RuntimeException ex) {
-            client.abortMultipartUpload(container, key, uploadId);
-            throw ex;
-         }
-      } else {
-         // TODO: find a way to disable multipart. if we pass the original
-         // options, it goes into a stack overflow
-         return client.putObject(container, blobToObject.apply(blob));
-      }
-   }
-
-   private void prepareUploadPart(String container, String key, String uploadId, int part, Payload chunkedPart,
-         long offset, SortedMap<Integer, String> etags) {
-      String eTag = null;
-      try {
-         eTag = client.uploadPart(container, key, part, uploadId, chunkedPart);
-         etags.put(Integer.valueOf(part), eTag);
-      } catch (KeyNotFoundException e) {
-         // note that because of eventual consistency, the upload id may not be
-         // present yet we may wish to add this condition to the retry handler
-
-         // we may also choose to implement ListParts and wait for the uploadId
-         // to become available there.
-         eTag = client.uploadPart(container, key, part, uploadId, chunkedPart);
-         etags.put(Integer.valueOf(part), eTag);
-      }
-   }
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ETagFromHttpResponseViaRegex.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ETagFromHttpResponseViaRegex.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ETagFromHttpResponseViaRegex.java
deleted file mode 100644
index 46f5837..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ETagFromHttpResponseViaRegex.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.functions;
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-import org.jclouds.http.HttpResponse;
-import org.jclouds.http.functions.ReturnStringIf2xx;
-
-import com.google.common.base.Function;
-
-@Singleton
-public class ETagFromHttpResponseViaRegex implements Function<HttpResponse, String> {
-   private static Pattern pattern = Pattern.compile("<ETag>([\\S&&[^<]]+)</ETag>");
-   private static String ESCAPED_QUOTE = "&quot;";
-   private final ReturnStringIf2xx returnStringIf200;
-
-   @Inject
-   ETagFromHttpResponseViaRegex(ReturnStringIf2xx returnStringIf200) {
-      this.returnStringIf200 = returnStringIf200;
-   }
-
-   @Override
-   public String apply(HttpResponse response) {
-      String value = null;
-      String content = returnStringIf200.apply(response);
-      if (content != null) {
-         Matcher matcher = pattern.matcher(content);
-         if (matcher.find()) {
-            value = matcher.group(1);
-            if (value.indexOf(ESCAPED_QUOTE) != -1) {
-               value = value.replace(ESCAPED_QUOTE, "\"");
-            }
-         }
-      }
-      return value;
-   }
-
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ObjectMetadataKey.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ObjectMetadataKey.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ObjectMetadataKey.java
deleted file mode 100644
index 2fdc9ec..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/ObjectMetadataKey.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.functions;
-
-import javax.inject.Singleton;
-
-import org.jclouds.s3.domain.ObjectMetadata;
-
-import com.google.common.base.Function;
-
-@Singleton
-public class ObjectMetadataKey implements Function<Object, String> {
-
-   public String apply(Object from) {
-      return ((ObjectMetadata) from).getKey();
-   }
-
-}

http://git-wip-us.apache.org/repos/asf/jclouds/blob/ba2f8ac2/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/UploadIdFromHttpResponseViaRegex.java
----------------------------------------------------------------------
diff --git a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/UploadIdFromHttpResponseViaRegex.java b/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/UploadIdFromHttpResponseViaRegex.java
deleted file mode 100644
index 9b42714..0000000
--- a/providers/aws-s3/src/main/java/org/jclouds/aws/s3/functions/UploadIdFromHttpResponseViaRegex.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.jclouds.aws.s3.functions;
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-import org.jclouds.http.HttpResponse;
-import org.jclouds.http.functions.ReturnStringIf2xx;
-
-import com.google.common.base.Function;
-
-@Singleton
-public class UploadIdFromHttpResponseViaRegex implements Function<HttpResponse, String> {
-   Pattern pattern = Pattern.compile("<UploadId>([\\S&&[^<]]+)</UploadId>");
-   private final ReturnStringIf2xx returnStringIf200;
-
-   @Inject
-   UploadIdFromHttpResponseViaRegex(ReturnStringIf2xx returnStringIf200) {
-      this.returnStringIf200 = returnStringIf200;
-   }
-
-   @Override
-   public String apply(HttpResponse response) {
-      String value = null;
-      String content = returnStringIf200.apply(response);
-      if (content != null) {
-         Matcher matcher = pattern.matcher(content);
-         if (matcher.find()) {
-            value = matcher.group(1);
-         }
-      }
-      return value;
-   }
-
-}