You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by at...@apache.org on 2014/11/06 02:27:10 UTC

[3/4] HADOOP-10714. AmazonS3Client.deleteObjects() need to be limited to 1000 entries per call. Contributed by Juan Yu. (cherry picked from commit 6ba52d88ec11444cbac946ffadbc645acd0657de)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9082fe4e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
new file mode 100644
index 0000000..c913a67
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestS3ADeleteManyFiles extends S3AScaleTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestS3ADeleteManyFiles.class);
+
+
+  @Rule
+  public Timeout testTimeout = new Timeout(30 * 60 * 1000);
+
+  @Test
+  public void testBulkRenameAndDelete() throws Throwable {
+    final Path scaleTestDir = getTestPath();
+    final Path srcDir = new Path(scaleTestDir, "src");
+    final Path finalDir = new Path(scaleTestDir, "final");
+    final long count = getOperationCount();
+    ContractTestUtils.rm(fs, scaleTestDir, true, false);
+
+    fs.mkdirs(srcDir);
+    fs.mkdirs(finalDir);
+
+    int testBufferSize = fs.getConf()
+        .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
+            ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
+    // use Executor to speed up file creation
+    ExecutorService exec = Executors.newFixedThreadPool(16);
+    final ExecutorCompletionService<Boolean> completionService =
+        new ExecutorCompletionService<Boolean>(exec);
+    try {
+      final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');
+
+      for (int i = 0; i < count; ++i) {
+        final String fileName = "foo-" + i;
+        completionService.submit(new Callable<Boolean>() {
+          @Override
+          public Boolean call() throws IOException {
+            ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
+                false, data);
+            return fs.exists(new Path(srcDir, fileName));
+          }
+        });
+      }
+      for (int i = 0; i < count; ++i) {
+        final Future<Boolean> future = completionService.take();
+        try {
+          if (!future.get()) {
+            LOG.warn("cannot create file");
+          }
+        } catch (ExecutionException e) {
+          LOG.warn("Error while uploading file", e.getCause());
+          throw e;
+        }
+      }
+    } finally {
+      exec.shutdown();
+    }
+
+    int nSrcFiles = fs.listStatus(srcDir).length;
+    fs.rename(srcDir, finalDir);
+    assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
+    ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
+        new Path(srcDir, "foo-" + 0));
+    ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
+        new Path(srcDir, "foo-" + count / 2));
+    ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
+        new Path(srcDir, "foo-" + (count - 1)));
+    ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
+        new Path(finalDir, "foo-" + 0));
+    ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
+        new Path(finalDir, "foo-" + count/2));
+    ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
+        new Path(finalDir, "foo-" + (count-1)));
+
+    ContractTestUtils.assertDeleted(fs, finalDir, true, false);
+  }
+
+  @Test
+  public void testOpenCreate() throws IOException {
+    Path dir = new Path("/tests3a");
+    ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
+    ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
+    ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);
+
+
+    /*
+    Enable to test the multipart upload
+    try {
+      ContractTestUtils.createAndVerifyFile(fs, dir,
+          (long)6 * 1024 * 1024 * 1024);
+    } catch (IOException e) {
+      fail(e.getMessage());
+    }
+    */
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9082fe4e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index ac6b9ec..f215219 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -22,15 +22,17 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3native.NativeS3FileSystem.NativeS3FsInputStream;
+import org.junit.internal.AssumptionViolatedException;
 
 public abstract class NativeS3FileSystemContractBaseTest
   extends FileSystemContractBaseTest {
-  
+  public static final String KEY_TEST_FS = "test.fs.s3n.name";
   private NativeFileSystemStore store;
   
   abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
@@ -40,7 +42,12 @@ public abstract class NativeS3FileSystemContractBaseTest
     Configuration conf = new Configuration();
     store = getNativeFileSystemStore();
     fs = new NativeS3FileSystem(store);
-    fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
+    String fsname = conf.get(KEY_TEST_FS);
+    if (StringUtils.isEmpty(fsname)) {
+      throw new AssumptionViolatedException(
+          "No test FS defined in :" + KEY_TEST_FS);
+    }
+    fs.initialize(URI.create(fsname), conf);
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9082fe4e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
index b1078a4..dbd476e 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
@@ -117,10 +117,13 @@ public class TestJets3tNativeFileSystemStore {
     writeRenameReadCompare(new Path("/test/medium"), 33554432);    // 100 MB
   }
 
+  /*
+  Enable Multipart upload to run this test
   @Test
   public void testExtraLargeUpload()
       throws IOException, NoSuchAlgorithmException {
     // Multipart upload, multipart copy
     writeRenameReadCompare(new Path("/test/xlarge"), 5368709121L); // 5GB+1byte
   }
+  */
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9082fe4e/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
index 4142471..4f9c081 100644
--- a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
+++ b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
@@ -48,6 +48,11 @@
   </property>
 
   <property>
+    <name>fs.contract.rename-remove-dest-if-empty-dir</name>
+    <value>true</value>
+  </property>
+
+  <property>
     <name>fs.contract.supports-append</name>
     <value>false</value>
   </property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9082fe4e/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
new file mode 100644
index 0000000..3397769
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<!-- Values used when running unit tests.  Specify any values in here that
+     should override the default values. -->
+
+<configuration>
+
+    <property>
+        <name>hadoop.tmp.dir</name>
+        <value>target/build/test</value>
+        <description>A base for other temporary directories.</description>
+        <final>true</final>
+    </property>
+
+    <!-- Turn security off for tests by default -->
+    <property>
+        <name>hadoop.security.authentication</name>
+        <value>simple</value>
+    </property>
+
+    <!--
+    To run these tests.
+
+    # Create a file auth-keys.xml  - DO NOT ADD TO REVISION CONTROL
+    # add the property test.fs.s3n.name to point to an S3 filesystem URL
+    # Add the credentials for the service you are testing against
+    -->
+    <include xmlns="http://www.w3.org/2001/XInclude"
+             href="auth-keys.xml"/>
+
+
+
+</configuration>