You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2017/09/02 21:20:53 UTC
[09/48] hadoop git commit: HADOOP-13345 HS3Guard: Improved
Consistency for S3A. Contributed by: Chris Nauroth, Aaron Fabbri,
Mingliang Liu, Lei (Eddy) Xu, Sean Mackrory, Steve Loughran and others.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java
new file mode 100644
index 0000000..876cc80
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
+import org.apache.hadoop.fs.s3a.s3guard.PathMetadata;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
+
+/**
+ * Test the performance of a MetadataStore. Useful for load testing.
+ * Could be separated from S3A code, but we're using the S3A scale test
+ * framework for convenience.
+ */
+public abstract class AbstractITestS3AMetadataStoreScale extends
+ S3AScaleTestBase {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ AbstractITestS3AMetadataStoreScale.class);
+
+ /** Some dummy values for FileStatus contents. */
+ static final long BLOCK_SIZE = 32 * 1024 * 1024;
+ static final long SIZE = BLOCK_SIZE * 2;
+ static final String OWNER = "bob";
+ static final long ACCESS_TIME = System.currentTimeMillis();
+
+ static final Path BUCKET_ROOT = new Path("s3a://fake-bucket/");
+
+ /**
+ * Subclasses should override this to provide the MetadataStore they which
+ * to test.
+ * @return MetadataStore to test against
+ * @throws IOException
+ */
+ public abstract MetadataStore createMetadataStore() throws IOException;
+
+ @Test
+ public void testPut() throws Throwable {
+ describe("Test workload of put() operations");
+
+ // As described in hadoop-aws site docs, count parameter is used for
+ // width and depth of directory tree
+ int width = getConf().getInt(KEY_DIRECTORY_COUNT, DEFAULT_DIRECTORY_COUNT);
+ int depth = width;
+
+ List<PathMetadata> paths = new ArrayList<>();
+ createDirTree(BUCKET_ROOT, depth, width, paths);
+
+ long count = 1; // Some value in case we throw an exception below
+ try (MetadataStore ms = createMetadataStore()) {
+
+ try {
+ count = populateMetadataStore(paths, ms);
+ } finally {
+ clearMetadataStore(ms, count);
+ }
+ }
+ }
+
+ @Test
+ public void testMoves() throws Throwable {
+ describe("Test workload of batched move() operations");
+
+ // As described in hadoop-aws site docs, count parameter is used for
+ // width and depth of directory tree
+ int width = getConf().getInt(KEY_DIRECTORY_COUNT, DEFAULT_DIRECTORY_COUNT);
+ int depth = width;
+
+ long operations = getConf().getLong(KEY_OPERATION_COUNT,
+ DEFAULT_OPERATION_COUNT);
+
+ List<PathMetadata> origMetas = new ArrayList<>();
+ createDirTree(BUCKET_ROOT, depth, width, origMetas);
+
+ // Pre-compute source and destination paths for move() loop below
+ List<Path> origPaths = metasToPaths(origMetas);
+ List<PathMetadata> movedMetas = moveMetas(origMetas, BUCKET_ROOT,
+ new Path(BUCKET_ROOT, "moved-here"));
+ List<Path> movedPaths = metasToPaths(movedMetas);
+
+ long count = 1; // Some value in case we throw an exception below
+ try (MetadataStore ms = createMetadataStore()) {
+
+ try {
+ // Setup
+ count = populateMetadataStore(origMetas, ms);
+
+ // Main loop: move things back and forth
+ describe("Running move workload");
+ NanoTimer moveTimer = new NanoTimer();
+ LOG.info("Running {} moves of {} paths each", operations,
+ origMetas.size());
+ for (int i = 0; i < operations; i++) {
+ Collection<Path> toDelete;
+ Collection<PathMetadata> toCreate;
+ if (i % 2 == 0) {
+ toDelete = origPaths;
+ toCreate = movedMetas;
+ } else {
+ toDelete = movedPaths;
+ toCreate = origMetas;
+ }
+ ms.move(toDelete, toCreate);
+ }
+ moveTimer.end();
+ printTiming(LOG, "move", moveTimer, operations);
+ } finally {
+ // Cleanup
+ clearMetadataStore(ms, count);
+ }
+ }
+ }
+
+ /**
+ * Create a copy of given list of PathMetadatas with the paths moved from
+ * src to dest.
+ */
+ private List<PathMetadata> moveMetas(List<PathMetadata> metas, Path src,
+ Path dest) throws IOException {
+ List<PathMetadata> moved = new ArrayList<>(metas.size());
+ for (PathMetadata srcMeta : metas) {
+ S3AFileStatus status = copyStatus((S3AFileStatus)srcMeta.getFileStatus());
+ status.setPath(movePath(status.getPath(), src, dest));
+ moved.add(new PathMetadata(status));
+ }
+ return moved;
+ }
+
+ private Path movePath(Path p, Path src, Path dest) {
+ String srcStr = src.toUri().getPath();
+ String pathStr = p.toUri().getPath();
+ // Strip off src dir
+ pathStr = pathStr.substring(srcStr.length());
+ // Prepend new dest
+ return new Path(dest, pathStr);
+ }
+
+ private S3AFileStatus copyStatus(S3AFileStatus status) {
+ if (status.isDirectory()) {
+ return new S3AFileStatus(status.isEmptyDirectory(), status.getPath(),
+ status.getOwner());
+ } else {
+ return new S3AFileStatus(status.getLen(), status.getModificationTime(),
+ status.getPath(), status.getBlockSize(), status.getOwner());
+ }
+ }
+
+ /** @return number of PathMetadatas put() into MetadataStore */
+ private long populateMetadataStore(Collection<PathMetadata> paths,
+ MetadataStore ms) throws IOException {
+ long count = 0;
+ NanoTimer putTimer = new NanoTimer();
+ describe("Inserting into MetadataStore");
+ for (PathMetadata p : paths) {
+ ms.put(p);
+ count++;
+ }
+ putTimer.end();
+ printTiming(LOG, "put", putTimer, count);
+ return count;
+ }
+
+ private void clearMetadataStore(MetadataStore ms, long count)
+ throws IOException {
+ describe("Recursive deletion");
+ NanoTimer deleteTimer = new NanoTimer();
+ ms.deleteSubtree(BUCKET_ROOT);
+ deleteTimer.end();
+ printTiming(LOG, "delete", deleteTimer, count);
+ }
+
+ private static void printTiming(Logger log, String op, NanoTimer timer,
+ long count) {
+ double msec = (double)timer.duration() / 1000;
+ double msecPerOp = msec / count;
+ log.info(String.format("Elapsed %.2f msec. %.3f msec / %s (%d ops)", msec,
+ msecPerOp, op, count));
+ }
+
+ private static S3AFileStatus makeFileStatus(Path path) throws IOException {
+ return new S3AFileStatus(SIZE, ACCESS_TIME, path, BLOCK_SIZE, OWNER);
+ }
+
+ private static S3AFileStatus makeDirStatus(Path p) throws IOException {
+ return new S3AFileStatus(false, p, OWNER);
+ }
+
+ private List<Path> metasToPaths(List<PathMetadata> metas) {
+ List<Path> paths = new ArrayList<>(metas.size());
+ for (PathMetadata meta : metas) {
+ paths.add(meta.getFileStatus().getPath());
+ }
+ return paths;
+ }
+
+ /**
+ * Recursively create a directory tree.
+ * @param parent Parent dir of the paths to create.
+ * @param depth How many more levels deep past parent to create.
+ * @param width Number of files (and directories, if depth > 0) per directory.
+ * @param paths List to add generated paths to.
+ */
+ private static void createDirTree(Path parent, int depth, int width,
+ Collection<PathMetadata> paths) throws IOException {
+
+ // Create files
+ for (int i = 0; i < width; i++) {
+ Path p = new Path(parent, String.format("file-%d", i));
+ PathMetadata meta = new PathMetadata(makeFileStatus(p));
+ paths.add(meta);
+ }
+
+ if (depth == 0) {
+ return;
+ }
+
+ // Create directories if there is depth remaining
+ for (int i = 0; i < width; i++) {
+ Path dir = new Path(parent, String.format("dir-%d", i));
+ PathMetadata meta = new PathMetadata(makeDirStatus(dir));
+ paths.add(meta);
+ createDirTree(dir, depth-1, width, paths);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
index 89fae82..8b163cb 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicLong;
import com.amazonaws.event.ProgressEvent;
import com.amazonaws.event.ProgressEventType;
import com.amazonaws.event.ProgressListener;
+import org.apache.hadoop.fs.FileStatus;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
@@ -34,11 +35,9 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.fs.s3a.S3AFileStatus;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.S3AInstrumentation;
import org.apache.hadoop.fs.s3a.Statistic;
@@ -222,7 +221,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
assertEquals("active put requests in \n" + fs,
0, gaugeValue(putRequestsActive));
ContractTestUtils.assertPathExists(fs, "Huge file", hugefile);
- S3AFileStatus status = fs.getFileStatus(hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
ContractTestUtils.assertIsFile(hugefile, status);
assertEquals("File size in " + status, filesize, status.getLen());
if (progress != null) {
@@ -324,7 +323,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
String filetype = encrypted ? "encrypted file" : "file";
describe("Positioned reads of %s %s", filetype, hugefile);
S3AFileSystem fs = getFileSystem();
- S3AFileStatus status = fs.getFileStatus(hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
int ops = 0;
final int bufferSize = 8192;
@@ -364,7 +363,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
assumeHugeFileExists();
describe("Reading %s", hugefile);
S3AFileSystem fs = getFileSystem();
- S3AFileStatus status = fs.getFileStatus(hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
long blocks = filesize / uploadBlockSize;
byte[] data = new byte[uploadBlockSize];
@@ -390,7 +389,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
assumeHugeFileExists();
describe("renaming %s to %s", hugefile, hugefileRenamed);
S3AFileSystem fs = getFileSystem();
- S3AFileStatus status = fs.getFileStatus(hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
fs.delete(hugefileRenamed, false);
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
@@ -401,7 +400,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
toHuman(timer.nanosPerOperation(mb)));
bandwidth(timer, filesize);
logFSState();
- S3AFileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
+ FileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
assertEquals(filesize, destFileStatus.getLen());
// rename back
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestDynamoDBMetadataStoreScale.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestDynamoDBMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestDynamoDBMetadataStoreScale.java
new file mode 100644
index 0000000..3de1935
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestDynamoDBMetadataStoreScale.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore;
+import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
+
+import java.io.IOException;
+
+import static org.junit.Assume.*;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+
+/**
+ * Scale test for DynamoDBMetadataStore.
+ */
+public class ITestDynamoDBMetadataStoreScale
+ extends AbstractITestS3AMetadataStoreScale {
+
+ @Override
+ public MetadataStore createMetadataStore() throws IOException {
+ Configuration conf = getFileSystem().getConf();
+ String ddbTable = conf.get(S3GUARD_DDB_TABLE_NAME_KEY);
+ assumeNotNull("DynamoDB table is configured", ddbTable);
+ String ddbEndpoint = conf.get(S3GUARD_DDB_REGION_KEY);
+ assumeNotNull("DynamoDB endpoint is configured", ddbEndpoint);
+
+ DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
+ ms.initialize(getFileSystem().getConf());
+ return ms;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestLocalMetadataStoreScale.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestLocalMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestLocalMetadataStoreScale.java
new file mode 100644
index 0000000..591fb0e
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestLocalMetadataStoreScale.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore;
+import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
+
+import java.io.IOException;
+
+/**
+ * Scale test for LocalMetadataStore.
+ */
+public class ITestLocalMetadataStoreScale
+ extends AbstractITestS3AMetadataStoreScale {
+ @Override
+ public MetadataStore createMetadataStore() throws IOException {
+ MetadataStore ms = new LocalMetadataStore();
+ ms.initialize(getFileSystem());
+ return ms;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java
index b4d3862..e320bb2 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java
@@ -107,7 +107,7 @@ public class ITestS3AConcurrentOps extends S3AScaleTestBase {
private S3AFileSystem getNormalFileSystem() throws Exception {
S3AFileSystem s3a = new S3AFileSystem();
- Configuration conf = new Configuration();
+ Configuration conf = createScaleConfiguration();
URI rootURI = new URI(conf.get(TEST_FS_S3A_NAME));
s3a.initialize(rootURI, conf);
return s3a;
@@ -115,6 +115,7 @@ public class ITestS3AConcurrentOps extends S3AScaleTestBase {
@After
public void teardown() throws Exception {
+ super.teardown();
if (auxFs != null) {
auxFs.delete(testRoot, true);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java
new file mode 100644
index 0000000..fd32ba5
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.OutputStream;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+
+/**
+ * Tests for create(): performance and/or load testing.
+ */
+public class ITestS3ACreatePerformance extends S3AScaleTestBase {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ ITestS3ADirectoryPerformance.class);
+
+ private Path basePath;
+ private int basePathDepth;
+ private static final int PATH_DEPTH = 10;
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ basePath = getTestPath();
+ basePathDepth = basePath.depth();
+ }
+
+ /**
+ * Test rate at which we can create deeply-nested files from a single thread.
+ * @throws Exception
+ */
+ @Test
+ public void testDeepSequentialCreate() throws Exception {
+ long numOperations = getOperationCount();
+ S3AFileSystem fs = getFileSystem();
+
+ NanoTimer timer = new NanoTimer();
+ for (int i = 0; i < numOperations; i++) {
+ Path p = getPathIteration(i, PATH_DEPTH);
+ OutputStream out = fs.create(p);
+ out.write(40); // one byte file with some value 40
+ out.close();
+ }
+ timer.end("Time to create %d files of depth %d", getOperationCount(),
+ PATH_DEPTH);
+ LOG.info("Time per create: {} msec",
+ timer.nanosPerOperation(numOperations) / 1000);
+ }
+
+ /* Get a unique path of depth totalDepth for given test iteration. */
+ private Path getPathIteration(long iter, int totalDepth) throws Exception {
+ assertTrue("Test path too long, increase PATH_DEPTH in test.",
+ totalDepth > basePathDepth);
+
+ int neededDirs = totalDepth - basePathDepth - 1;
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < neededDirs; i++) {
+ sb.append("iter-").append(iter);
+ sb.append("-dir-").append(i);
+ sb.append("/");
+ }
+ sb.append("file").append(iter);
+ return new Path(basePath, sb.toString());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java
index d71364f..03f1e22 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java
@@ -113,14 +113,15 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
listContinueRequests,
listStatusCalls,
getFileStatusCalls);
- assertEquals(listRequests.toString(), 2, listRequests.diff());
+ if (!fs.hasMetadataStore()) {
+ assertEquals(listRequests.toString(), 2, listRequests.diff());
+ }
reset(metadataRequests,
listRequests,
listContinueRequests,
listStatusCalls,
getFileStatusCalls);
-
} finally {
describe("deletion");
// deletion at the end of the run
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
index 236ffcd..83ab210 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
@@ -20,10 +20,10 @@ package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.fs.s3a.S3AFileStatus;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.S3AInputPolicy;
import org.apache.hadoop.fs.s3a.S3AInputStream;
@@ -56,7 +56,7 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
private S3AFileSystem s3aFS;
private Path testData;
- private S3AFileStatus testDataStatus;
+ private FileStatus testDataStatus;
private FSDataInputStream in;
private S3AInstrumentation.InputStreamStatistics streamStatistics;
public static final int BLOCK_SIZE = 32 * 1024;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
index 0f844b1..b2a1aa0 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
@@ -126,7 +126,7 @@ public class S3AScaleTestBase extends AbstractS3ATestBase {
* @return a configuration with which to create FS instances
*/
protected Configuration createScaleConfiguration() {
- return new Configuration();
+ return super.createConfiguration();
}
protected Path getTestPath() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
index d424aa4..e8200da 100644
--- a/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
+++ b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
@@ -36,6 +36,25 @@
<description>The endpoint for s3a://landsat-pds URLs</description>
</property>
+ <!-- Make sure S3Guard is disabled for read-only bucket tests. -->
+ <property>
+ <name>fs.s3a.bucket.landsat-pds.metadatastore.impl</name>
+ <value>${s3guard.null}</value>
+ <description>The read-only landsat-pds repository isn't
+ managed by s3guard</description>
+ </property>
+
+ <!-- Convenience definitions. -->
+ <property>
+ <name>s3guard.null</name>
+ <value>org.apache.hadoop.fs.s3a.s3guard.NullMetadataStore</value>
+ </property>
+
+ <property>
+ <name>s3guard.dynamo</name>
+ <value>org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore</value>
+ </property>
+
<!--
This is the default endpoint, which can be used to interact
with any v2 region.
@@ -110,6 +129,13 @@
<value>${central.endpoint}</value>
</property>
+ <!-- Scale integration tests may time out on slower connections
+ you can reduce the operation count like so to mitigate this.
+ <property>
+ <name>scale.test.operation.count</name>
+ <value>500</value>
+ </property>
+ -->
<!-- Turn security off for tests by default -->
<property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/621b43e2/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
index 1330ed1..9376ebd 100644
--- a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
+++ b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
@@ -19,5 +19,16 @@ log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%
log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-# for debugging low level S3a operations, uncomment this line
-# log4j.logger.org.apache.hadoop.fs.s3a=DEBUG
+# for debugging low level S3a operations, uncomment these lines
+# Log all S3A classes
+#log4j.logger.org.apache.hadoop.fs.s3a=DEBUG
+
+# Log S3Guard classes
+#log4j.logger.org.apache.hadoop.fs.s3a.s3guard=DEBUG
+
+# Enable debug logging of AWS DynamoDB client
+#log4j.logger.com.amazonaws.services.dynamodbv2.AmazonDynamoDB=DEBUG
+
+# Log all HTTP requests made; includes S3 interaction. This may
+# include sensitive information such as account IDs in HTTP headers.
+#log4j.logger.com.amazonaws.request=DEBUG
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org