You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2017/09/15 16:39:26 UTC
[01/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Repository: hadoop
Updated Branches:
refs/heads/branch-3.0 b5e998235 -> 9f6b08f84
refs/heads/trunk 11390c2d1 -> 2d2d97fa7
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
deleted file mode 100644
index 818a844..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
+++ /dev/null
@@ -1,579 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.metrics;
-
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_CLIENT_ERRORS;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DIRECTORIES_CREATED;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_LATENCY;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_RATE;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_CREATED;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_DELETED;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_SERVER_ERRORS;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_LATENCY;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_RATE;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Mockito.verify;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Date;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
-import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.hamcrest.BaseMatcher;
-import org.hamcrest.Description;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestAzureFileSystemInstrumentation {
- private FileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- @Test
- public void testMetricTags() throws Exception {
- String accountName =
- testAccount.getRealAccount().getBlobEndpoint()
- .getAuthority();
- String containerName =
- testAccount.getRealContainer().getName();
- MetricsRecordBuilder myMetrics = getMyMetrics();
- verify(myMetrics).add(argThat(
- new TagMatcher("accountName", accountName)
- ));
- verify(myMetrics).add(argThat(
- new TagMatcher("containerName", containerName)
- ));
- verify(myMetrics).add(argThat(
- new TagMatcher("Context", "azureFileSystem")
- ));
- verify(myMetrics).add(argThat(
- new TagExistsMatcher("wasbFileSystemId")
- ));
- }
-
-
- @Test
- public void testMetricsOnMkdirList() throws Exception {
- long base = getBaseWebResponses();
-
- // Create a directory
- assertTrue(fs.mkdirs(new Path("a")));
- // At the time of writing
- // getAncestor uses 2 calls for each folder level /user/<name>/a
- // plus 1 call made by checkContainer
- // mkdir checks the hierarchy with 2 calls per level
- // mkdirs calls storeEmptyDir to create the empty folder, which makes 5 calls
- // For a total of 7 + 6 + 5 = 18 web responses
- base = assertWebResponsesInRange(base, 1, 18);
- assertEquals(1,
- AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
-
- // List the root contents
- assertEquals(1, fs.listStatus(new Path("/")).length);
- base = assertWebResponsesEquals(base, 1);
-
- assertNoErrors();
- }
-
- private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
- NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
- AzureNativeFileSystemStore azureStore = azureFs.getStore();
- return azureStore.getBandwidthGaugeUpdater();
- }
-
- private static byte[] nonZeroByteArray(int size) {
- byte[] data = new byte[size];
- Arrays.fill(data, (byte)5);
- return data;
- }
-
- @Test
- public void testMetricsOnFileCreateRead() throws Exception {
- long base = getBaseWebResponses();
-
- assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
-
- Path filePath = new Path("/metricsTest_webResponses");
- final int FILE_SIZE = 1000;
-
- // Suppress auto-update of bandwidth metrics so we get
- // to update them exactly when we want to.
- getBandwidthGaugeUpdater().suppressAutoUpdate();
-
- // Create a file
- Date start = new Date();
- OutputStream outputStream = fs.create(filePath);
- outputStream.write(nonZeroByteArray(FILE_SIZE));
- outputStream.close();
- long uploadDurationMs = new Date().getTime() - start.getTime();
-
- // The exact number of requests/responses that happen to create a file
- // can vary - at the time of writing this code it takes 10
- // requests/responses for the 1000 byte file (33 for 100 MB),
- // plus the initial container-check request but that
- // can very easily change in the future. Just assert that we do roughly
- // more than 2 but less than 15.
- logOpResponseCount("Creating a 1K file", base);
- base = assertWebResponsesInRange(base, 2, 15);
- getBandwidthGaugeUpdater().triggerUpdate(true);
- long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
- assertTrue("The bytes written in the last second " + bytesWritten +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
- long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
- assertTrue("The total bytes written " + totalBytesWritten +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
- long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
- System.out.println("Upload rate: " + uploadRate + " bytes/second.");
- long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
- assertTrue("The upload rate " + uploadRate +
- " is below the expected range of around " + expectedRate +
- " bytes/second that the unit test observed. This should never be" +
- " the case since the test underestimates the rate by looking at " +
- " end-to-end time instead of just block upload time.",
- uploadRate >= expectedRate);
- long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_UPLOAD_LATENCY);
- System.out.println("Upload latency: " + uploadLatency);
- long expectedLatency = uploadDurationMs; // We're uploading less than a block.
- assertTrue("The upload latency " + uploadLatency +
- " should be greater than zero now that I've just uploaded a file.",
- uploadLatency > 0);
- assertTrue("The upload latency " + uploadLatency +
- " is more than the expected range of around " + expectedLatency +
- " milliseconds that the unit test observed. This should never be" +
- " the case since the test overestimates the latency by looking at " +
- " end-to-end time instead of just block upload time.",
- uploadLatency <= expectedLatency);
-
- // Read the file
- start = new Date();
- InputStream inputStream = fs.open(filePath);
- int count = 0;
- while (inputStream.read() >= 0) {
- count++;
- }
- inputStream.close();
- long downloadDurationMs = new Date().getTime() - start.getTime();
- assertEquals(FILE_SIZE, count);
-
- // Again, exact number varies. At the time of writing this code
- // it takes 4 request/responses, so just assert a rough range between
- // 1 and 10.
- logOpResponseCount("Reading a 1K file", base);
- base = assertWebResponsesInRange(base, 1, 10);
- getBandwidthGaugeUpdater().triggerUpdate(false);
- long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
- assertEquals(FILE_SIZE, totalBytesRead);
- long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
- assertTrue("The bytes read in the last second " + bytesRead +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
- long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
- System.out.println("Download rate: " + downloadRate + " bytes/second.");
- expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
- assertTrue("The download rate " + downloadRate +
- " is below the expected range of around " + expectedRate +
- " bytes/second that the unit test observed. This should never be" +
- " the case since the test underestimates the rate by looking at " +
- " end-to-end time instead of just block download time.",
- downloadRate >= expectedRate);
- long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_DOWNLOAD_LATENCY);
- System.out.println("Download latency: " + downloadLatency);
- expectedLatency = downloadDurationMs; // We're downloading less than a block.
- assertTrue("The download latency " + downloadLatency +
- " should be greater than zero now that I've just downloaded a file.",
- downloadLatency > 0);
- assertTrue("The download latency " + downloadLatency +
- " is more than the expected range of around " + expectedLatency +
- " milliseconds that the unit test observed. This should never be" +
- " the case since the test overestimates the latency by looking at " +
- " end-to-end time instead of just block download time.",
- downloadLatency <= expectedLatency);
-
- assertNoErrors();
- }
-
- @Test
- public void testMetricsOnBigFileCreateRead() throws Exception {
- long base = getBaseWebResponses();
-
- assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
-
- Path filePath = new Path("/metricsTest_webResponses");
- final int FILE_SIZE = 100 * 1024 * 1024;
-
- // Suppress auto-update of bandwidth metrics so we get
- // to update them exactly when we want to.
- getBandwidthGaugeUpdater().suppressAutoUpdate();
-
- // Create a file
- OutputStream outputStream = fs.create(filePath);
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
-
- // The exact number of requests/responses that happen to create a file
- // can vary - at the time of writing this code it takes 34
- // requests/responses for the 100 MB file,
- // plus the initial container check request, but that
- // can very easily change in the future. Just assert that we do roughly
- // more than 20 but less than 50.
- logOpResponseCount("Creating a 100 MB file", base);
- base = assertWebResponsesInRange(base, 20, 50);
- getBandwidthGaugeUpdater().triggerUpdate(true);
- long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
- assertTrue("The total bytes written " + totalBytesWritten +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
- long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
- System.out.println("Upload rate: " + uploadRate + " bytes/second.");
- long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_UPLOAD_LATENCY);
- System.out.println("Upload latency: " + uploadLatency);
- assertTrue("The upload latency " + uploadLatency +
- " should be greater than zero now that I've just uploaded a file.",
- uploadLatency > 0);
-
- // Read the file
- InputStream inputStream = fs.open(filePath);
- int count = 0;
- while (inputStream.read() >= 0) {
- count++;
- }
- inputStream.close();
- assertEquals(FILE_SIZE, count);
-
- // Again, exact number varies. At the time of writing this code
- // it takes 27 request/responses, so just assert a rough range between
- // 20 and 40.
- logOpResponseCount("Reading a 100 MB file", base);
- base = assertWebResponsesInRange(base, 20, 40);
- getBandwidthGaugeUpdater().triggerUpdate(false);
- long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
- assertEquals(FILE_SIZE, totalBytesRead);
- long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
- System.out.println("Download rate: " + downloadRate + " bytes/second.");
- long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_DOWNLOAD_LATENCY);
- System.out.println("Download latency: " + downloadLatency);
- assertTrue("The download latency " + downloadLatency +
- " should be greater than zero now that I've just downloaded a file.",
- downloadLatency > 0);
- }
-
- @Test
- public void testMetricsOnFileRename() throws Exception {
- long base = getBaseWebResponses();
-
- Path originalPath = new Path("/metricsTest_RenameStart");
- Path destinationPath = new Path("/metricsTest_RenameFinal");
-
- // Create an empty file
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
- assertTrue(fs.createNewFile(originalPath));
- logOpResponseCount("Creating an empty file", base);
- base = assertWebResponsesInRange(base, 2, 20);
- assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
-
- // Rename the file
- assertTrue(fs.rename(originalPath, destinationPath));
- // Varies: at the time of writing this code it takes 7 requests/responses.
- logOpResponseCount("Renaming a file", base);
- base = assertWebResponsesInRange(base, 2, 15);
-
- assertNoErrors();
- }
-
- @Test
- public void testMetricsOnFileExistsDelete() throws Exception {
- long base = getBaseWebResponses();
-
- Path filePath = new Path("/metricsTest_delete");
-
- // Check existence
- assertFalse(fs.exists(filePath));
- // At the time of writing this code it takes 2 requests/responses to
- // check existence, which seems excessive, plus initial request for
- // container check.
- logOpResponseCount("Checking file existence for non-existent file", base);
- base = assertWebResponsesInRange(base, 1, 3);
-
- // Create an empty file
- assertTrue(fs.createNewFile(filePath));
- base = getCurrentWebResponses();
-
- // Check existence again
- assertTrue(fs.exists(filePath));
- logOpResponseCount("Checking file existence for existent file", base);
- base = assertWebResponsesInRange(base, 1, 2);
-
- // Delete the file
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
- assertTrue(fs.delete(filePath, false));
- // At the time of writing this code it takes 4 requests/responses to
- // delete, which seems excessive. Check for range 1-4 for now.
- logOpResponseCount("Deleting a file", base);
- base = assertWebResponsesInRange(base, 1, 4);
- assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
-
- assertNoErrors();
- }
-
- @Test
- public void testMetricsOnDirRename() throws Exception {
- long base = getBaseWebResponses();
-
- Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
- Path innerFileName = new Path(originalDirName, "innerFile");
- Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
-
- // Create an empty directory
- assertTrue(fs.mkdirs(originalDirName));
- base = getCurrentWebResponses();
-
- // Create an inner file
- assertTrue(fs.createNewFile(innerFileName));
- base = getCurrentWebResponses();
-
- // Rename the directory
- assertTrue(fs.rename(originalDirName, destDirName));
- // At the time of writing this code it takes 11 requests/responses
- // to rename the directory with one file. Check for range 1-20 for now.
- logOpResponseCount("Renaming a directory", base);
- base = assertWebResponsesInRange(base, 1, 20);
-
- assertNoErrors();
- }
-
- @Test
- public void testClientErrorMetrics() throws Exception {
- String fileName = "metricsTestFile_ClientError";
- Path filePath = new Path("/"+fileName);
- final int FILE_SIZE = 100;
- OutputStream outputStream = null;
- String leaseID = null;
- try {
- // Create a file
- outputStream = fs.create(filePath);
- leaseID = testAccount.acquireShortLease(fileName);
- try {
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
- assertTrue("Should've thrown", false);
- } catch (AzureException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("lease"));
- }
- assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
- } finally {
- if(leaseID != null){
- testAccount.releaseLease(leaseID, fileName);
- }
- IOUtils.closeStream(outputStream);
- }
- }
-
- private void logOpResponseCount(String opName, long base) {
- System.out.println(opName + " took " + (getCurrentWebResponses() - base) +
- " web responses to complete.");
- }
-
- /**
- * Gets (and asserts) the value of the wasb_web_responses counter just
- * after the creation of the file system object.
- */
- private long getBaseWebResponses() {
- // The number of requests should start at 0
- return assertWebResponsesEquals(0, 0);
- }
-
- /**
- * Gets the current value of the wasb_web_responses counter.
- */
- private long getCurrentWebResponses() {
- return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
- }
-
- /**
- * Checks that the wasb_web_responses counter is at the given value.
- * @param base The base value (before the operation of interest).
- * @param expected The expected value for the operation of interest.
- * @return The new base value now.
- */
- private long assertWebResponsesEquals(long base, long expected) {
- assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
- return base + expected;
- }
-
- private void assertNoErrors() {
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
- }
-
- /**
- * Checks that the wasb_web_responses counter is in the given range.
- * @param base The base value (before the operation of interest).
- * @param inclusiveLowerLimit The lower limit for what it should increase by.
- * @param inclusiveUpperLimit The upper limit for what it should increase by.
- * @return The new base value now.
- */
- private long assertWebResponsesInRange(long base,
- long inclusiveLowerLimit,
- long inclusiveUpperLimit) {
- long currentResponses = getCurrentWebResponses();
- long justOperation = currentResponses - base;
- assertTrue(String.format(
- "Web responses expected in range [%d, %d], but was %d.",
- inclusiveLowerLimit, inclusiveUpperLimit, justOperation),
- justOperation >= inclusiveLowerLimit &&
- justOperation <= inclusiveUpperLimit);
- return currentResponses;
- }
-
- /**
- * Gets the metrics for the file system object.
- * @return The metrics record.
- */
- private MetricsRecordBuilder getMyMetrics() {
- return getMetrics(getInstrumentation());
- }
-
- private AzureFileSystemInstrumentation getInstrumentation() {
- return ((NativeAzureFileSystem)fs).getInstrumentation();
- }
-
- /**
- * A matcher class for asserting that we got a tag with a given
- * value.
- */
- private static class TagMatcher extends TagExistsMatcher {
- private final String tagValue;
-
- public TagMatcher(String tagName, String tagValue) {
- super(tagName);
- this.tagValue = tagValue;
- }
-
- @Override
- public boolean matches(MetricsTag toMatch) {
- return toMatch.value().equals(tagValue);
- }
-
- @Override
- public void describeTo(Description desc) {
- super.describeTo(desc);
- desc.appendText(" with value " + tagValue);
- }
- }
-
- /**
- * A matcher class for asserting that we got a tag with any value.
- */
- private static class TagExistsMatcher extends BaseMatcher<MetricsTag> {
- private final String tagName;
-
- public TagExistsMatcher(String tagName) {
- this.tagName = tagName;
- }
-
- @Override
- public boolean matches(Object toMatch) {
- MetricsTag asTag = (MetricsTag)toMatch;
- return asTag.name().equals(tagName) && matches(asTag);
- }
-
- protected boolean matches(MetricsTag toMatch) {
- return true;
- }
-
- @Override
- public void describeTo(Description desc) {
- desc.appendText("Has tag " + tagName);
- }
- }
-
- /**
- * A matcher class for asserting that a long value is in a
- * given range.
- */
- private static class InRange extends BaseMatcher<Long> {
- private final long inclusiveLowerLimit;
- private final long inclusiveUpperLimit;
- private long obtained;
-
- public InRange(long inclusiveLowerLimit, long inclusiveUpperLimit) {
- this.inclusiveLowerLimit = inclusiveLowerLimit;
- this.inclusiveUpperLimit = inclusiveUpperLimit;
- }
-
- @Override
- public boolean matches(Object number) {
- obtained = (Long)number;
- return obtained >= inclusiveLowerLimit &&
- obtained <= inclusiveUpperLimit;
- }
-
- @Override
- public void describeTo(Description description) {
- description.appendText("Between " + inclusiveLowerLimit +
- " and " + inclusiveUpperLimit + " inclusively");
- }
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
deleted file mode 100644
index 8aad9e9..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ /dev/null
@@ -1,569 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.io.retry.RetryUtils;
-import org.apache.http.Header;
-import org.apache.http.HttpResponse;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpStatus;
-import org.apache.http.StatusLine;
-import org.apache.http.ProtocolVersion;
-import org.apache.http.ParseException;
-import org.apache.http.HeaderElement;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.hamcrest.Description;
-import org.hamcrest.TypeSafeMatcher;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Mockito;
-
-import java.io.ByteArrayInputStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.StandardCharsets;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.times;
-
-/**
- * Test class to hold all WasbRemoteCallHelper tests
- */
-public class TestWasbRemoteCallHelper
- extends AbstractWasbTestBase {
- public static final String EMPTY_STRING = "";
- private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = new Configuration();
- conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
- conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/,http://localhost:8080");
- return AzureBlobStorageTestAccount.create(conf);
- }
-
- @Before
- public void beforeMethod() {
- boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
- boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
- Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
- useSecureMode && useAuthorization);
-
- Assume.assumeTrue(
- useSecureMode && useAuthorization
- );
- }
-
- @Rule
- public ExpectedException expectedEx = ExpectedException.none();
-
- /**
- * Test invalid status-code
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testInvalidStatusCode() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test invalid Content-Type
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testInvalidContentType() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "text/plain"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test missing Content-Length
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testMissingContentLength() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test Content-Length exceeds max
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testContentLengthExceedsMax() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "2048"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test invalid Content-Length value
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testInvalidContentLengthValue() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "20abc48"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test valid JSON response
- * @throws Throwable
- */
- @Test
- public void testValidJSONResponse() throws Throwable {
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
-
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test malformed JSON response
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testMalFormedJSONResponse() throws Throwable {
-
- expectedEx.expect(WasbAuthorizationException.class);
- expectedEx.expectMessage("com.fasterxml.jackson.core.JsonParseException: Unexpected end-of-input in FIELD_NAME");
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
-
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(malformedJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test valid JSON response failure response code
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testFailureCodeJSONResponse() throws Throwable {
-
- expectedEx.expect(WasbAuthorizationException.class);
- expectedEx.expectMessage("Remote authorization service encountered an error Unauthorized");
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
-
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(failureCodeJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- @Test
- public void testWhenOneInstanceIsDown() throws Throwable {
-
- boolean isAuthorizationCachingEnabled = fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, false);
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService1.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService1.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService2.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService2.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseServiceLocal = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseServiceLocal.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseServiceLocal.getEntity())
- .thenReturn(mockHttpEntity);
-
-
-
- class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost1");
- }
- }
- class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost2");
- }
- }
- class HttpGetForServiceLocal extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- try {
- return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
- } catch (UnknownHostException e) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost");
- }
- }
- }
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
- .thenReturn(mockHttpResponseService1);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
- .thenReturn(mockHttpResponseService2);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal())))
- .thenReturn(mockHttpResponseServiceLocal);
-
- //Need 2 times because performop() does 2 fs operations.
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(validJsonResponse()
- .getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse()
- .getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse()
- .getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
-
- int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 1 : 2;
- Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
- Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForService2()));
- }
-
- @Test
- public void testWhenServiceInstancesAreDown() throws Throwable {
- //expectedEx.expect(WasbAuthorizationException.class);
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService1.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService1.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService2.getStatusLine())
- .thenReturn(newStatusLine(
- HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService2.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseService3 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService3.getStatusLine())
- .thenReturn(newStatusLine(
- HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService3.getEntity())
- .thenReturn(mockHttpEntity);
-
- class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost1");
- }
- }
- class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost2");
- }
- }
- class HttpGetForService3 extends ArgumentMatcher<HttpGet> {
- @Override public boolean matches(Object o){
- try {
- return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
- } catch (UnknownHostException e) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost");
- }
- }
- }
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
- .thenReturn(mockHttpResponseService1);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
- .thenReturn(mockHttpResponseService2);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService3())))
- .thenReturn(mockHttpResponseService3);
-
- //Need 3 times because performop() does 3 fs operations.
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(
- validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(
- validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(
- validJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
- try {
- performop(mockHttpClient);
- }catch (WasbAuthorizationException e){
- e.printStackTrace();
- Mockito.verify(mockHttpClient, atLeast(2))
- .execute(argThat(new HttpGetForService1()));
- Mockito.verify(mockHttpClient, atLeast(2))
- .execute(argThat(new HttpGetForService2()));
- Mockito.verify(mockHttpClient, atLeast(3))
- .execute(argThat(new HttpGetForService3()));
- Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any());
- }
- }
-
- private void setupExpectations() {
- expectedEx.expect(WasbAuthorizationException.class);
-
- class MatchesPattern extends TypeSafeMatcher<String> {
- private String pattern;
-
- MatchesPattern(String pattern) {
- this.pattern = pattern;
- }
-
- @Override protected boolean matchesSafely(String item) {
- return item.matches(pattern);
- }
-
- @Override public void describeTo(Description description) {
- description.appendText("matches pattern ").appendValue(pattern);
- }
-
- @Override protected void describeMismatchSafely(String item,
- Description mismatchDescription) {
- mismatchDescription.appendText("does not match");
- }
- }
-
- expectedEx.expectMessage(new MatchesPattern(
- "org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: "
- + "Encountered error while making remote call to "
- + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/,http:\\/\\/localhost:8080 retried 6 time\\(s\\)\\."));
- }
-
- private void performop(HttpClient mockHttpClient) throws Throwable {
-
- Path testPath = new Path("/", "test.dat");
-
- RemoteWasbAuthorizerImpl authorizer = new RemoteWasbAuthorizerImpl();
- authorizer.init(fs.getConf());
- WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper(
- RetryUtils.getMultipleLinearRandomRetry(new Configuration(),
- EMPTY_STRING, true,
- EMPTY_STRING, "1000,3,10000,2"));
- mockWasbRemoteCallHelper.updateHttpClient(mockHttpClient);
- authorizer.updateWasbRemoteCallHelper(mockWasbRemoteCallHelper);
- fs.updateWasbAuthorizer(authorizer);
-
- fs.create(testPath);
- ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
- fs.delete(testPath, false);
- }
-
- private String validJsonResponse() {
- return "{"
- + "\"responseCode\": 0,"
- + "\"authorizationResult\": true,"
- + "\"responseMessage\": \"Authorized\""
- + "}";
- }
-
- private String malformedJsonResponse() {
- return "{"
- + "\"responseCode\": 0,"
- + "\"authorizationResult\": true,"
- + "\"responseMessage\":";
- }
-
- private String failureCodeJsonResponse() {
- return "{"
- + "\"responseCode\": 1,"
- + "\"authorizationResult\": false,"
- + "\"responseMessage\": \"Unauthorized\""
- + "}";
- }
-
- private StatusLine newStatusLine(int statusCode) {
- return new StatusLine() {
- @Override
- public ProtocolVersion getProtocolVersion() {
- return new ProtocolVersion("HTTP", 1, 1);
- }
-
- @Override
- public int getStatusCode() {
- return statusCode;
- }
-
- @Override
- public String getReasonPhrase() {
- return "Reason Phrase";
- }
- };
- }
-
- private Header newHeader(String name, String value) {
- return new Header() {
- @Override
- public String getName() {
- return name;
- }
-
- @Override
- public String getValue() {
- return value;
- }
-
- @Override
- public HeaderElement[] getElements() throws ParseException {
- return new HeaderElement[0];
- }
- };
- }
-
- /** Check that a HttpGet request is with given remote host. */
- private static boolean checkHttpGetMatchHost(HttpGet g, String h) {
- return g != null && g.getURI().getHost().equals(h);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
deleted file mode 100644
index 672ed9c..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ /dev/null
@@ -1,617 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.Date;
-import java.util.EnumSet;
-import java.io.File;
-
-import org.apache.hadoop.security.ProviderUtils;
-import org.apache.hadoop.security.alias.CredentialProvider;
-import org.apache.hadoop.security.alias.CredentialProviderFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.AbstractFileSystem;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.microsoft.azure.storage.blob.CloudBlobContainer;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-public class TestWasbUriAndConfiguration {
-
- private static final int FILE_SIZE = 4096;
- private static final String PATH_DELIMITER = "/";
-
- protected String accountName;
- protected String accountKey;
- protected static Configuration conf = null;
- private boolean runningInSASMode = false;
- @Rule
- public final TemporaryFolder tempDir = new TemporaryFolder();
-
- private AzureBlobStorageTestAccount testAccount;
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- @Before
- public void setMode() {
- runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
- getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
- }
-
- private boolean validateIOStreams(Path filePath) throws IOException {
- // Capture the file system from the test account.
- FileSystem fs = testAccount.getFileSystem();
- return validateIOStreams(fs, filePath);
- }
-
- private boolean validateIOStreams(FileSystem fs, Path filePath)
- throws IOException {
-
- // Create and write a file
- OutputStream outputStream = fs.create(filePath);
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
-
- // Return true if the the count is equivalent to the file size.
- return (FILE_SIZE == readInputStream(fs, filePath));
- }
-
- private int readInputStream(Path filePath) throws IOException {
- // Capture the file system from the test account.
- FileSystem fs = testAccount.getFileSystem();
- return readInputStream(fs, filePath);
- }
-
- private int readInputStream(FileSystem fs, Path filePath) throws IOException {
- // Read the file
- InputStream inputStream = fs.open(filePath);
- int count = 0;
- while (inputStream.read() >= 0) {
- count++;
- }
- inputStream.close();
-
- // Return true if the the count is equivalent to the file size.
- return count;
- }
-
- // Positive tests to exercise making a connection with to Azure account using
- // account key.
- @Test
- public void testConnectUsingKey() throws Exception {
-
- testAccount = AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
-
- // Validate input and output on the connection.
- assertTrue(validateIOStreams(new Path("/wasb_scheme")));
- }
-
- @Test
- public void testConnectUsingSAS() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- // Create the test account with SAS credentials.
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer));
- assumeNotNull(testAccount);
- // Validate input and output on the connection.
- // NOTE: As of 4/15/2013, Azure Storage has a deficiency that prevents the
- // full scenario from working (CopyFromBlob doesn't work with SAS), so
- // just do a minor check until that is corrected.
- assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist")));
- //assertTrue(validateIOStreams(new Path("/sastest.txt")));
- }
-
- @Test
- public void testConnectUsingSASReadonly() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- // Create the test account with SAS credentials.
- testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(
- CreateOptions.UseSas, CreateOptions.CreateContainer,
- CreateOptions.Readonly));
- assumeNotNull(testAccount);
-
- // Create a blob in there
- final String blobKey = "blobForReadonly";
- CloudBlobContainer container = testAccount.getRealContainer();
- CloudBlockBlob blob = container.getBlockBlobReference(blobKey);
- ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] { 1,
- 2, 3 });
- blob.upload(inputStream, 3);
- inputStream.close();
-
- // Make sure we can read it from the file system
- Path filePath = new Path("/" + blobKey);
- FileSystem fs = testAccount.getFileSystem();
- assertTrue(fs.exists(filePath));
- byte[] obtained = new byte[3];
- DataInputStream obtainedInputStream = fs.open(filePath);
- obtainedInputStream.readFully(obtained);
- obtainedInputStream.close();
- assertEquals(3, obtained[2]);
- }
-
- @Test
- public void testConnectUsingAnonymous() throws Exception {
-
- // Create test account with anonymous credentials
- testAccount = AzureBlobStorageTestAccount.createAnonymous("testWasb.txt",
- FILE_SIZE);
- assumeNotNull(testAccount);
-
- // Read the file from the public folder using anonymous credentials.
- assertEquals(FILE_SIZE, readInputStream(new Path("/testWasb.txt")));
- }
-
- @Test
- public void testConnectToEmulator() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createForEmulator();
- assumeNotNull(testAccount);
- assertTrue(validateIOStreams(new Path("/testFile")));
- }
-
- /**
- * Tests that we can connect to fully qualified accounts outside of
- * blob.core.windows.net
- */
- @Test
- public void testConnectToFullyQualifiedAccountMock() throws Exception {
- Configuration conf = new Configuration();
- AzureBlobStorageTestAccount.setMockAccountKey(conf,
- "mockAccount.mock.authority.net");
- AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
- MockStorageInterface mockStorage = new MockStorageInterface();
- store.setAzureStorageInteractionLayer(mockStorage);
- NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
- fs.initialize(
- new URI("wasb://mockContainer@mockAccount.mock.authority.net"), conf);
- fs.createNewFile(new Path("/x"));
- assertTrue(mockStorage.getBackingStore().exists(
- "http://mockAccount.mock.authority.net/mockContainer/x"));
- fs.close();
- }
-
- public void testConnectToRoot() throws Exception {
-
- // Set up blob names.
- final String blobPrefix = String.format("wasbtests-%s-%tQ-blob",
- System.getProperty("user.name"), new Date());
- final String inblobName = blobPrefix + "_In" + ".txt";
- final String outblobName = blobPrefix + "_Out" + ".txt";
-
- // Create test account with default root access.
- testAccount = AzureBlobStorageTestAccount.createRoot(inblobName, FILE_SIZE);
- assumeNotNull(testAccount);
-
- // Read the file from the default container.
- assertEquals(FILE_SIZE, readInputStream(new Path(PATH_DELIMITER
- + inblobName)));
-
- try {
- // Capture file system.
- FileSystem fs = testAccount.getFileSystem();
-
- // Create output path and open an output stream to the root folder.
- Path outputPath = new Path(PATH_DELIMITER + outblobName);
- OutputStream outputStream = fs.create(outputPath);
- fail("Expected an AzureException when writing to root folder.");
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
- } catch (AzureException e) {
- assertTrue(true);
- } catch (Exception e) {
- String errMsg = String.format(
- "Expected AzureException but got %s instead.", e);
- assertTrue(errMsg, false);
- }
- }
-
- // Positive tests to exercise throttling I/O path. Connections are made to an
- // Azure account using account key.
- //
- public void testConnectWithThrottling() throws Exception {
-
- testAccount = AzureBlobStorageTestAccount.createThrottled();
-
- // Validate input and output on the connection.
- assertTrue(validateIOStreams(new Path("/wasb_scheme")));
- }
-
- /**
- * Creates a file and writes a single byte with the given value in it.
- */
- private static void writeSingleByte(FileSystem fs, Path testFile, int toWrite)
- throws Exception {
- OutputStream outputStream = fs.create(testFile);
- outputStream.write(toWrite);
- outputStream.close();
- }
-
- /**
- * Reads the file given and makes sure that it's a single-byte file with the
- * given value in it.
- */
- private static void assertSingleByteValue(FileSystem fs, Path testFile,
- int expectedValue) throws Exception {
- InputStream inputStream = fs.open(testFile);
- int byteRead = inputStream.read();
- assertTrue("File unexpectedly empty: " + testFile, byteRead >= 0);
- assertTrue("File has more than a single byte: " + testFile,
- inputStream.read() < 0);
- inputStream.close();
- assertEquals("Unxpected content in: " + testFile, expectedValue, byteRead);
- }
-
- @Test
- public void testMultipleContainers() throws Exception {
- AzureBlobStorageTestAccount firstAccount = AzureBlobStorageTestAccount
- .create("first"), secondAccount = AzureBlobStorageTestAccount
- .create("second");
- assumeNotNull(firstAccount);
- assumeNotNull(secondAccount);
- try {
- FileSystem firstFs = firstAccount.getFileSystem(),
- secondFs = secondAccount.getFileSystem();
- Path testFile = new Path("/testWasb");
- assertTrue(validateIOStreams(firstFs, testFile));
- assertTrue(validateIOStreams(secondFs, testFile));
- // Make sure that we're really dealing with two file systems here.
- writeSingleByte(firstFs, testFile, 5);
- writeSingleByte(secondFs, testFile, 7);
- assertSingleByteValue(firstFs, testFile, 5);
- assertSingleByteValue(secondFs, testFile, 7);
- } finally {
- firstAccount.cleanup();
- secondAccount.cleanup();
- }
- }
-
- @Test
- public void testDefaultKeyProvider() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
- String key = "testkey";
-
- conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
-
- String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
- account, conf);
- assertEquals(key, result);
- }
-
- @Test
- public void testCredsFromCredentialProvider() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- String account = "testacct";
- String key = "testkey";
- // set up conf to have a cred provider
- final Configuration conf = new Configuration();
- final File file = tempDir.newFile("test.jks");
- final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
- file.toURI());
- conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
- jks.toString());
-
- provisionAccountKey(conf, account, key);
-
- // also add to configuration as clear text that should be overridden
- conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,
- key + "cleartext");
-
- String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
- account, conf);
- // result should contain the credential provider key not the config key
- assertEquals("AccountKey incorrect.", key, result);
- }
-
- void provisionAccountKey(
- final Configuration conf, String account, String key) throws Exception {
- // add our creds to the provider
- final CredentialProvider provider =
- CredentialProviderFactory.getProviders(conf).get(0);
- provider.createCredentialEntry(
- SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key.toCharArray());
- provider.flush();
- }
-
- @Test
- public void testValidKeyProvider() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
- String key = "testkey";
-
- conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
- conf.setClass("fs.azure.account.keyprovider." + account,
- SimpleKeyProvider.class, KeyProvider.class);
- String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
- account, conf);
- assertEquals(key, result);
- }
-
- @Test
- public void testInvalidKeyProviderNonexistantClass() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
-
- conf.set("fs.azure.account.keyprovider." + account,
- "org.apache.Nonexistant.Class");
- try {
- AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
- Assert.fail("Nonexistant key provider class should have thrown a "
- + "KeyProviderException");
- } catch (KeyProviderException e) {
- }
- }
-
- @Test
- public void testInvalidKeyProviderWrongClass() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
-
- conf.set("fs.azure.account.keyprovider." + account, "java.lang.String");
- try {
- AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
- Assert.fail("Key provider class that doesn't implement KeyProvider "
- + "should have thrown a KeyProviderException");
- } catch (KeyProviderException e) {
- }
- }
-
- /**
- * Tests the cases when the URI is specified with no authority, i.e.
- * wasb:///path/to/file.
- */
- @Test
- public void testNoUriAuthority() throws Exception {
- // For any combination of default FS being asv(s)/wasb(s)://c@a/ and
- // the actual URI being asv(s)/wasb(s):///, it should work.
-
- String[] wasbAliases = new String[] { "wasb", "wasbs" };
- for (String defaultScheme : wasbAliases) {
- for (String wantedScheme : wasbAliases) {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI(defaultScheme, authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- // Add references to file system implementations for wasb and wasbs.
- conf.addResource("azure-test.xml");
- URI wantedUri = new URI(wantedScheme + ":///random/path");
- NativeAzureFileSystem obtained = (NativeAzureFileSystem) FileSystem
- .get(wantedUri, conf);
- assertNotNull(obtained);
- assertEquals(new URI(wantedScheme, authority, null, null, null),
- obtained.getUri());
- // Make sure makeQualified works as expected
- Path qualified = obtained.makeQualified(new Path(wantedUri));
- assertEquals(new URI(wantedScheme, authority, wantedUri.getPath(),
- null, null), qualified.toUri());
- // Cleanup for the next iteration to not cache anything in FS
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
- // If the default FS is not a WASB FS, then specifying a URI without
- // authority for the Azure file system should throw.
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- conf.set(FS_DEFAULT_NAME_KEY, "file:///");
- try {
- FileSystem.get(new URI("wasb:///random/path"), conf);
- fail("Should've thrown.");
- } catch (IllegalArgumentException e) {
- }
- }
-
- @Test
- public void testWasbAsDefaultFileSystemHasNoPort() throws Exception {
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasb", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.addResource("azure-test.xml");
-
- FileSystem fs = FileSystem.get(conf);
- assertTrue(fs instanceof NativeAzureFileSystem);
- assertEquals(-1, fs.getUri().getPort());
-
- AbstractFileSystem afs = FileContext.getFileContext(conf)
- .getDefaultFileSystem();
- assertTrue(afs instanceof Wasb);
- assertEquals(-1, afs.getUri().getPort());
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-
- /**
- * Tests the cases when the scheme specified is 'wasbs'.
- */
- @Test
- public void testAbstractFileSystemImplementationForWasbsScheme() throws Exception {
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
- conf.addResource("azure-test.xml");
-
- FileSystem fs = FileSystem.get(conf);
- assertTrue(fs instanceof NativeAzureFileSystem);
- assertEquals("wasbs", fs.getScheme());
-
- AbstractFileSystem afs = FileContext.getFileContext(conf)
- .getDefaultFileSystem();
- assertTrue(afs instanceof Wasbs);
- assertEquals(-1, afs.getUri().getPort());
- assertEquals("wasbs", afs.getUri().getScheme());
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-
- @Test
- public void testNoAbstractFileSystemImplementationSpecifiedForWasbsScheme() throws Exception {
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
-
- FileSystem fs = FileSystem.get(conf);
- assertTrue(fs instanceof NativeAzureFileSystem);
- assertEquals("wasbs", fs.getScheme());
-
- // should throw if 'fs.AbstractFileSystem.wasbs.impl'' is not specified
- try{
- FileContext.getFileContext(conf).getDefaultFileSystem();
- fail("Should've thrown.");
- }catch(UnsupportedFileSystemException e){
- }
-
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-
- @Test
- public void testCredentialProviderPathExclusions() throws Exception {
- String providerPath =
- "user:///,jceks://wasb/user/hrt_qa/sqoopdbpasswd.jceks," +
- "jceks://hdfs@nn1.example.com/my/path/test.jceks";
- Configuration config = new Configuration();
- config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
- providerPath);
- String newPath = "user:///,jceks://hdfs@nn1.example.com/my/path/test.jceks";
-
- excludeAndTestExpectations(config, newPath);
- }
-
- @Test
- public void testExcludeAllProviderTypesFromConfig() throws Exception {
- String providerPath =
- "jceks://wasb/tmp/test.jceks," +
- "jceks://wasb@/my/path/test.jceks";
- Configuration config = new Configuration();
- config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
- providerPath);
- String newPath = null;
-
- excludeAndTestExpectations(config, newPath);
- }
-
- void excludeAndTestExpectations(Configuration config, String newPath)
- throws Exception {
- Configuration conf = ProviderUtils.excludeIncompatibleCredentialProviders(
- config, NativeAzureFileSystem.class);
- String effectivePath = conf.get(
- CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
- assertEquals(newPath, effectivePath);
- }
-
- @Test
- public void testUserAgentConfig() throws Exception {
- // Set the user agent
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
-
- conf.set(AzureNativeFileSystemStore.USER_AGENT_ID_KEY, "TestClient");
-
- FileSystem fs = FileSystem.get(conf);
- AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
-
- assertTrue(afs instanceof Wasbs);
- assertEquals(-1, afs.getUri().getPort());
- assertEquals("wasbs", afs.getUri().getScheme());
-
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
-
- // Unset the user agent
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
-
- conf.unset(AzureNativeFileSystemStore.USER_AGENT_ID_KEY);
-
- FileSystem fs = FileSystem.get(conf);
- AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
- assertTrue(afs instanceof Wasbs);
- assertEquals(-1, afs.getUri().getPort());
- assertEquals("wasbs", afs.getUri().getScheme());
-
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
new file mode 100644
index 0000000..fd21bd2
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
+
+/**
+ * Append test, skipping one of them.
+ */
+
+public class ITestAzureNativeContractAppend extends AbstractContractAppendTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+
+ @Override
+ public void testRenameFileBeingAppended() throws Throwable {
+ skip("Skipping as renaming an opened file is not supported");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java
new file mode 100644
index 0000000..0ac046a
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractCreate extends AbstractContractCreateTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java
new file mode 100644
index 0000000..4c6dd48
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractDelete extends AbstractContractDeleteTest {
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java
new file mode 100644
index 0000000..7769570
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled;
+
+/**
+ * Contract test suite covering WASB integration with DistCp.
+ */
+public class ITestAzureNativeContractDistCp extends AbstractContractDistCpTest {
+
+ @Override
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
+ }
+
+ @Override
+ protected NativeAzureFileSystemContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ assumeScaleTestsEnabled(getContract().getConf());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java
new file mode 100644
index 0000000..9c09c0d
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractGetFileStatus
+ extends AbstractContractGetFileStatusTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java
new file mode 100644
index 0000000..71654b8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractMkdir extends AbstractContractMkdirTest {
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java
new file mode 100644
index 0000000..0b174e6
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractOpen extends AbstractContractOpenTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
new file mode 100644
index 0000000..474b874
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractRename extends AbstractContractRenameTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java
new file mode 100644
index 0000000..673d5f8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractSeek extends AbstractContractSeekTest{
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
index 28c13ea..a264aca 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
@@ -18,15 +18,21 @@
package org.apache.hadoop.fs.azure.contract;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
+/**
+ * Azure Contract. Test paths are created using any maven fork
+ * identifier, if defined. This guarantees paths unique to tests
+ * running in parallel.
+ */
public class NativeAzureFileSystemContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "wasb.xml";
- protected NativeAzureFileSystemContract(Configuration conf) {
- super(conf);
- //insert the base features
+ public NativeAzureFileSystemContract(Configuration conf) {
+ super(conf); //insert the base features
addConfResource(CONTRACT_XML);
}
@@ -34,4 +40,9 @@ public class NativeAzureFileSystemContract extends AbstractBondedFSContract {
public String getScheme() {
return "wasb";
}
-}
\ No newline at end of file
+
+ @Override
+ public Path getTestPath() {
+ return AzureTestUtils.createTestPath(super.getTestPath());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java
deleted file mode 100644
index 8a2341e..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Test;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
-
-public class TestAzureNativeContractAppend extends AbstractContractAppendTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-
- @Override
- public void testRenameFileBeingAppended() throws Throwable {
- skip("Skipping as renaming an opened file is not supported");
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java
deleted file mode 100644
index 531552d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractCreate extends AbstractContractCreateTest{
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java
deleted file mode 100644
index 5e5c13b..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractDelete extends AbstractContractDeleteTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java
deleted file mode 100644
index a3750d4..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-
-/**
- * Contract test suite covering WASB integration with DistCp.
- */
-public class TestAzureNativeContractDistCp extends AbstractContractDistCpTest {
-
- @Override
- protected NativeAzureFileSystemContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java
deleted file mode 100644
index b0c59ee..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractGetFileStatus extends AbstractContractGetFileStatusTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java
deleted file mode 100644
index 36df041..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractMkdir extends AbstractContractMkdirTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java
deleted file mode 100644
index d5147ac..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractOpen extends AbstractContractOpenTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java
deleted file mode 100644
index 4d8b2b5..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractRename extends AbstractContractRenameTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java
deleted file mode 100644
index 30046dc..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractSeek extends AbstractContractSeekTest{
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java
new file mode 100644
index 0000000..062d073
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+
+/**
+ * Scale tests are only executed if the scale profile
+ * is set; the setup method will check this and skip
+ * tests if not.
+ *
+ */
+public abstract class AbstractAzureScaleTest
+ extends AbstractWasbTestBase implements Sizes {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(AbstractAzureScaleTest.class);
+
+ @Override
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ LOG.debug("Scale test operation count = {}", getOperationCount());
+ assumeScaleTestsEnabled(getConfiguration());
+ }
+
+ /**
+ * Create the test account.
+ * @return a test account
+ * @throws Exception on any failure to create the account.
+ */
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(createConfiguration());
+ }
+
+ protected long getOperationCount() {
+ return getConfiguration().getLong(KEY_OPERATION_COUNT,
+ DEFAULT_OPERATION_COUNT);
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[19/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
new file mode 100644
index 0000000..0aa9393
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.util.Arrays;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import org.junit.After;
+import org.junit.Test;
+
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlockEntry;
+import com.microsoft.azure.storage.blob.BlockSearchMode;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.core.Base64;
+
+/**
+ * Test that we do proper data integrity validation with MD5 checks as
+ * configured.
+ */
+public class ITestBlobDataValidation extends AbstractWasbTestWithTimeout {
+ private AzureBlobStorageTestAccount testAccount;
+
+ @After
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanupTestAccount(testAccount);
+ }
+
+ /**
+ * Test that by default we don't store the blob-level MD5.
+ */
+ @Test
+ public void testBlobMd5StoreOffByDefault() throws Exception {
+ testAccount = AzureBlobStorageTestAccount.create();
+ testStoreBlobMd5(false);
+ }
+
+ /**
+ * Test that we get blob-level MD5 storage and validation if we specify that
+ * in the configuration.
+ */
+ @Test
+ public void testStoreBlobMd5() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(KEY_STORE_BLOB_MD5, true);
+ testAccount = AzureBlobStorageTestAccount.create(conf);
+ testStoreBlobMd5(true);
+ }
+
+ /**
+ * Trims a suffix/prefix from the given string. For example if
+ * s is given as "/xy" and toTrim is "/", this method returns "xy"
+ */
+ private static String trim(String s, String toTrim) {
+ return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim),
+ toTrim);
+ }
+
+ private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
+ assumeNotNull(testAccount);
+ // Write a test file.
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ Path testFilePath = AzureTestUtils.pathForTests(fs,
+ methodName.getMethodName());
+ String testFileKey = trim(testFilePath.toUri().getPath(), "/");
+ OutputStream outStream = fs.create(testFilePath);
+ outStream.write(new byte[] { 5, 15 });
+ outStream.close();
+
+ // Check that we stored/didn't store the MD5 field as configured.
+ CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
+ blob.downloadAttributes();
+ String obtainedMd5 = blob.getProperties().getContentMD5();
+ if (expectMd5Stored) {
+ assertNotNull(obtainedMd5);
+ } else {
+ assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
+ }
+
+ // Mess with the content so it doesn't match the MD5.
+ String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
+ blob.uploadBlock(newBlockId,
+ new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
+ blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(
+ newBlockId, BlockSearchMode.UNCOMMITTED) }));
+
+ // Now read back the content. If we stored the MD5 for the blob content
+ // we should get a data corruption error.
+ InputStream inStream = fs.open(testFilePath);
+ try {
+ byte[] inBuf = new byte[100];
+ while (inStream.read(inBuf) > 0){
+ //nothing;
+ }
+ inStream.close();
+ if (expectMd5Stored) {
+ fail("Should've thrown because of data corruption.");
+ }
+ } catch (IOException ex) {
+ if (!expectMd5Stored) {
+ throw ex;
+ }
+ StorageException cause = (StorageException)ex.getCause();
+ assertNotNull(cause);
+ assertEquals("Unexpected cause: " + cause,
+ StorageErrorCodeStrings.INVALID_MD5, cause.getErrorCode());
+ }
+ }
+
+ /**
+ * Test that by default we check block-level MD5.
+ */
+ @Test
+ public void testCheckBlockMd5() throws Exception {
+ testAccount = AzureBlobStorageTestAccount.create();
+ testCheckBlockMd5(true);
+ }
+
+ /**
+ * Test that we don't check block-level MD5 if we specify that in the
+ * configuration.
+ */
+ @Test
+ public void testDontCheckBlockMd5() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
+ testAccount = AzureBlobStorageTestAccount.create(conf);
+ testCheckBlockMd5(false);
+ }
+
+ /**
+ * Connection inspector to check that MD5 fields for content is set/not set as
+ * expected.
+ */
+ private static class ContentMD5Checker extends
+ StorageEvent<ResponseReceivedEvent> {
+ private final boolean expectMd5;
+
+ public ContentMD5Checker(boolean expectMd5) {
+ this.expectMd5 = expectMd5;
+ }
+
+ @Override
+ public void eventOccurred(ResponseReceivedEvent eventArg) {
+ HttpURLConnection connection = (HttpURLConnection) eventArg
+ .getConnectionObject();
+ if (isGetRange(connection)) {
+ checkObtainedMd5(connection
+ .getHeaderField(Constants.HeaderConstants.CONTENT_MD5));
+ } else if (isPutBlock(connection)) {
+ checkObtainedMd5(connection
+ .getRequestProperty(Constants.HeaderConstants.CONTENT_MD5));
+ }
+ }
+
+ private void checkObtainedMd5(String obtainedMd5) {
+ if (expectMd5) {
+ assertNotNull(obtainedMd5);
+ } else {
+ assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
+ }
+ }
+
+ private static boolean isPutBlock(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("PUT")
+ && connection.getURL().getQuery() != null
+ && connection.getURL().getQuery().contains("blockid");
+ }
+
+ private static boolean isGetRange(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("GET")
+ && connection
+ .getHeaderField(Constants.HeaderConstants.STORAGE_RANGE_HEADER) != null;
+ }
+ }
+
+ private void testCheckBlockMd5(final boolean expectMd5Checked)
+ throws Exception {
+ assumeNotNull(testAccount);
+ Path testFilePath = new Path("/testFile");
+
+ // Add a hook to check that for GET/PUT requests we set/don't set
+ // the block-level MD5 field as configured. I tried to do clever
+ // testing by also messing with the raw data to see if we actually
+ // validate the data as expected, but the HttpURLConnection wasn't
+ // pluggable enough for me to do that.
+ testAccount.getFileSystem().getStore()
+ .addTestHookToOperationContext(new TestHookOperationContext() {
+ @Override
+ public OperationContext modifyOperationContext(
+ OperationContext original) {
+ original.getResponseReceivedEventHandler().addListener(
+ new ContentMD5Checker(expectMd5Checked));
+ return original;
+ }
+ });
+
+ OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
+ outStream.write(new byte[] { 5, 15 });
+ outStream.close();
+
+ InputStream inStream = testAccount.getFileSystem().open(testFilePath);
+ byte[] inBuf = new byte[100];
+ while (inStream.read(inBuf) > 0){
+ //nothing;
+ }
+ inStream.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java
new file mode 100644
index 0000000..b46ad5b
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Date;
+
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
+
+
+/**
+ * A simple benchmark to find out the difference in speed between block
+ * and page blobs.
+ */
+public class ITestBlobTypeSpeedDifference extends AbstractWasbTestBase {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ /**
+ * Writes data to the given stream of the given size, flushing every
+ * x bytes.
+ */
+ private static void writeTestFile(OutputStream writeStream,
+ long size, long flushInterval) throws IOException {
+ int bufferSize = (int) Math.min(1000, flushInterval);
+ byte[] buffer = new byte[bufferSize];
+ Arrays.fill(buffer, (byte) 7);
+ int bytesWritten = 0;
+ int bytesUnflushed = 0;
+ while (bytesWritten < size) {
+ int numberToWrite = (int) Math.min(bufferSize, size - bytesWritten);
+ writeStream.write(buffer, 0, numberToWrite);
+ bytesWritten += numberToWrite;
+ bytesUnflushed += numberToWrite;
+ if (bytesUnflushed >= flushInterval) {
+ writeStream.flush();
+ bytesUnflushed = 0;
+ }
+ }
+ }
+
+ private static class TestResult {
+ final long timeTakenInMs;
+ final long totalNumberOfRequests;
+
+ TestResult(long timeTakenInMs, long totalNumberOfRequests) {
+ this.timeTakenInMs = timeTakenInMs;
+ this.totalNumberOfRequests = totalNumberOfRequests;
+ }
+ }
+
+ /**
+ * Writes data to the given file of the given size, flushing every
+ * x bytes. Measure performance of that and return it.
+ */
+ private static TestResult writeTestFile(NativeAzureFileSystem fs, Path path,
+ long size, long flushInterval) throws IOException {
+ AzureFileSystemInstrumentation instrumentation =
+ fs.getInstrumentation();
+ long initialRequests = instrumentation.getCurrentWebResponses();
+ Date start = new Date();
+ OutputStream output = fs.create(path);
+ writeTestFile(output, size, flushInterval);
+ output.close();
+ long finalRequests = instrumentation.getCurrentWebResponses();
+ return new TestResult(new Date().getTime() - start.getTime(),
+ finalRequests - initialRequests);
+ }
+
+ /**
+ * Writes data to a block blob of the given size, flushing every
+ * x bytes. Measure performance of that and return it.
+ */
+ private static TestResult writeBlockBlobTestFile(NativeAzureFileSystem fs,
+ long size, long flushInterval) throws IOException {
+ return writeTestFile(fs, new Path("/blockBlob"), size, flushInterval);
+ }
+
+ /**
+ * Writes data to a page blob of the given size, flushing every
+ * x bytes. Measure performance of that and return it.
+ */
+ private static TestResult writePageBlobTestFile(NativeAzureFileSystem fs,
+ long size, long flushInterval) throws IOException {
+ Path testFile = AzureTestUtils.blobPathForTests(fs,
+ "writePageBlobTestFile");
+ return writeTestFile(fs,
+ testFile,
+ size, flushInterval);
+ }
+
+ /**
+ * Runs the benchmark over a small 10 KB file, flushing every 500 bytes.
+ */
+ @Test
+ public void testTenKbFileFrequentFlush() throws Exception {
+ testForSizeAndFlushInterval(getFileSystem(), 10 * 1000, 500);
+ }
+
+ /**
+ * Runs the benchmark for the given file size and flush frequency.
+ */
+ private static void testForSizeAndFlushInterval(NativeAzureFileSystem fs,
+ final long size, final long flushInterval) throws IOException {
+ for (int i = 0; i < 5; i++) {
+ TestResult pageBlobResults = writePageBlobTestFile(fs, size, flushInterval);
+ System.out.printf(
+ "Page blob upload took %d ms. Total number of requests: %d.\n",
+ pageBlobResults.timeTakenInMs, pageBlobResults.totalNumberOfRequests);
+ TestResult blockBlobResults = writeBlockBlobTestFile(fs, size, flushInterval);
+ System.out.printf(
+ "Block blob upload took %d ms. Total number of requests: %d.\n",
+ blockBlobResults.timeTakenInMs, blockBlobResults.totalNumberOfRequests);
+ }
+ }
+
+ /**
+ * Runs the benchmark for the given file size and flush frequency from the
+ * command line.
+ */
+ public static void main(String[] argv) throws Exception {
+ Configuration conf = new Configuration();
+ long size = 10 * 1000 * 1000;
+ long flushInterval = 2000;
+ if (argv.length > 0) {
+ size = Long.parseLong(argv[0]);
+ }
+ if (argv.length > 1) {
+ flushInterval = Long.parseLong(argv[1]);
+ }
+ testForSizeAndFlushInterval(
+ (NativeAzureFileSystem) FileSystem.get(conf),
+ size,
+ flushInterval);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java
new file mode 100644
index 0000000..07a13df
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java
@@ -0,0 +1,874 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+import java.util.concurrent.Callable;
+
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
+
+import static org.junit.Assume.assumeNotNull;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test semantics and performance of the original block blob input stream
+ * (KEY_INPUT_STREAM_VERSION=1) and the new
+ * <code>BlockBlobInputStream</code> (KEY_INPUT_STREAM_VERSION=2).
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+
+public class ITestBlockBlobInputStream extends AbstractAzureScaleTest {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ ITestBlockBlobInputStream.class);
+ private static final int KILOBYTE = 1024;
+ private static final int MEGABYTE = KILOBYTE * KILOBYTE;
+ private static final int TEST_FILE_SIZE = 6 * MEGABYTE;
+ private static final Path TEST_FILE_PATH = new Path(
+ "TestBlockBlobInputStream.txt");
+
+ private AzureBlobStorageTestAccount accountUsingInputStreamV1;
+ private AzureBlobStorageTestAccount accountUsingInputStreamV2;
+ private long testFileLength;
+
+
+
+ private FileStatus testFileStatus;
+ private Path hugefile;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ Configuration conf = new Configuration();
+ conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
+
+ accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+ conf,
+ true);
+
+ accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+ null,
+ true);
+
+ assumeNotNull(accountUsingInputStreamV1);
+ assumeNotNull(accountUsingInputStreamV2);
+ hugefile = fs.makeQualified(TEST_FILE_PATH);
+ try {
+ testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
+ testFileLength = testFileStatus.getLen();
+ } catch (FileNotFoundException e) {
+ // file doesn't exist
+ testFileLength = 0;
+ }
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
+
+ accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+ conf,
+ true);
+
+ accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+ null,
+ true);
+
+ assumeNotNull(accountUsingInputStreamV1);
+ assumeNotNull(accountUsingInputStreamV2);
+ return accountUsingInputStreamV1;
+ }
+
+ /**
+ * Create a test file by repeating the characters in the alphabet.
+ * @throws IOException
+ */
+ private void createTestFileAndSetLength() throws IOException {
+ FileSystem fs = accountUsingInputStreamV1.getFileSystem();
+
+ // To reduce test run time, the test file can be reused.
+ if (fs.exists(TEST_FILE_PATH)) {
+ testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
+ testFileLength = testFileStatus.getLen();
+ LOG.info("Reusing test file: {}", testFileStatus);
+ return;
+ }
+
+ int sizeOfAlphabet = ('z' - 'a' + 1);
+ byte[] buffer = new byte[26 * KILOBYTE];
+ char character = 'a';
+ for (int i = 0; i < buffer.length; i++) {
+ buffer[i] = (byte) character;
+ character = (character == 'z') ? 'a' : (char) ((int) character + 1);
+ }
+
+ LOG.info("Creating test file {} of size: {}", TEST_FILE_PATH,
+ TEST_FILE_SIZE);
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+
+ try(FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) {
+ int bytesWritten = 0;
+ while (bytesWritten < TEST_FILE_SIZE) {
+ outputStream.write(buffer);
+ bytesWritten += buffer.length;
+ }
+ LOG.info("Closing stream {}", outputStream);
+ ContractTestUtils.NanoTimer closeTimer
+ = new ContractTestUtils.NanoTimer();
+ outputStream.close();
+ closeTimer.end("time to close() output stream");
+ }
+ timer.end("time to write %d KB", TEST_FILE_SIZE / 1024);
+ testFileLength = fs.getFileStatus(TEST_FILE_PATH).getLen();
+ }
+
+ void assumeHugeFileExists() throws IOException {
+ ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
+ ContractTestUtils.assertIsFile(hugefile, status);
+ assertTrue("File " + hugefile + " is empty", status.getLen() > 0);
+ }
+
+ /**
+ * Calculate megabits per second from the specified values for bytes and
+ * milliseconds.
+ * @param bytes The number of bytes.
+ * @param milliseconds The number of milliseconds.
+ * @return The number of megabits per second.
+ */
+ private static double toMbps(long bytes, long milliseconds) {
+ return bytes / 1000.0 * 8 / milliseconds;
+ }
+
+ @Test
+ public void test_0100_CreateHugeFile() throws IOException {
+ createTestFileAndSetLength();
+ }
+
+ @Test
+ public void test_0200_BasicReadTest() throws Exception {
+ assumeHugeFileExists();
+
+ try (
+ FSDataInputStream inputStreamV1
+ = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+ FSDataInputStream inputStreamV2
+ = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+ ) {
+ byte[] bufferV1 = new byte[3 * MEGABYTE];
+ byte[] bufferV2 = new byte[bufferV1.length];
+
+ // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
+ inputStreamV1.seek(5 * MEGABYTE);
+ int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
+ assertEquals(KILOBYTE, numBytesReadV1);
+
+ // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
+ inputStreamV2.seek(5 * MEGABYTE);
+ int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
+ assertEquals(KILOBYTE, numBytesReadV2);
+
+ assertArrayEquals(bufferV1, bufferV2);
+
+ int len = MEGABYTE;
+ int offset = bufferV1.length - len;
+
+ // v1 reverse seek and read a megabyte into last megabyte of bufferV1
+ inputStreamV1.seek(3 * MEGABYTE);
+ numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
+ assertEquals(len, numBytesReadV1);
+
+ // v2 reverse seek and read a megabyte into last megabyte of bufferV2
+ inputStreamV2.seek(3 * MEGABYTE);
+ numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
+ assertEquals(len, numBytesReadV2);
+
+ assertArrayEquals(bufferV1, bufferV2);
+ }
+ }
+
+ @Test
+ public void test_0201_RandomReadTest() throws Exception {
+ assumeHugeFileExists();
+
+ try (
+ FSDataInputStream inputStreamV1
+ = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+ FSDataInputStream inputStreamV2
+ = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+ ) {
+ final int bufferSize = 4 * KILOBYTE;
+ byte[] bufferV1 = new byte[bufferSize];
+ byte[] bufferV2 = new byte[bufferV1.length];
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ inputStreamV1.seek(0);
+ inputStreamV2.seek(0);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ int seekPosition = 2 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ inputStreamV1.seek(0);
+ inputStreamV2.seek(0);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ seekPosition = 5 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ seekPosition = 10 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ seekPosition = 4100 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+ }
+ }
+
+ private void verifyConsistentReads(FSDataInputStream inputStreamV1,
+ FSDataInputStream inputStreamV2,
+ byte[] bufferV1,
+ byte[] bufferV2) throws IOException {
+ int size = bufferV1.length;
+ final int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, size);
+ assertEquals("Bytes read from V1 stream", size, numBytesReadV1);
+
+ final int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, size);
+ assertEquals("Bytes read from V2 stream", size, numBytesReadV2);
+
+ assertArrayEquals("Mismatch in read data", bufferV1, bufferV2);
+ }
+
+ /**
+ * Validates the implementation of InputStream.markSupported.
+ * @throws IOException
+ */
+ @Test
+ public void test_0301_MarkSupportedV1() throws IOException {
+ validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.markSupported.
+ * @throws IOException
+ */
+ @Test
+ public void test_0302_MarkSupportedV2() throws IOException {
+ validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ private void validateMarkSupported(FileSystem fs) throws IOException {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ assertTrue("mark is not supported", inputStream.markSupported());
+ }
+ }
+
+ /**
+ * Validates the implementation of InputStream.mark and reset
+ * for version 1 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0303_MarkAndResetV1() throws Exception {
+ validateMarkAndReset(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.mark and reset
+ * for version 2 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0304_MarkAndResetV2() throws Exception {
+ validateMarkAndReset(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateMarkAndReset(FileSystem fs) throws Exception {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ inputStream.mark(KILOBYTE - 1);
+
+ byte[] buffer = new byte[KILOBYTE];
+ int bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+
+ inputStream.reset();
+ assertEquals("rest -> pos 0", 0, inputStream.getPos());
+
+ inputStream.mark(8 * KILOBYTE - 1);
+
+ buffer = new byte[8 * KILOBYTE];
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+
+ intercept(IOException.class,
+ "Resetting to invalid mark",
+ new Callable<FSDataInputStream>() {
+ @Override
+ public FSDataInputStream call() throws Exception {
+ inputStream.reset();
+ return inputStream;
+ }
+ }
+ );
+ }
+ }
+
+ /**
+ * Validates the implementation of Seekable.seekToNewSource, which should
+ * return false for version 1 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0305_SeekToNewSourceV1() throws IOException {
+ validateSeekToNewSource(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of Seekable.seekToNewSource, which should
+ * return false for version 2 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0306_SeekToNewSourceV2() throws IOException {
+ validateSeekToNewSource(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSeekToNewSource(FileSystem fs) throws IOException {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ assertFalse(inputStream.seekToNewSource(0));
+ }
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip and ensures there is no
+ * network I/O for version 1 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0307_SkipBoundsV1() throws Exception {
+ validateSkipBounds(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip and ensures there is no
+ * network I/O for version 2 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0308_SkipBoundsV2() throws Exception {
+ validateSkipBounds(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSkipBounds(FileSystem fs) throws Exception {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ NanoTimer timer = new NanoTimer();
+
+ long skipped = inputStream.skip(-1);
+ assertEquals(0, skipped);
+
+ skipped = inputStream.skip(0);
+ assertEquals(0, skipped);
+
+ assertTrue(testFileLength > 0);
+
+ skipped = inputStream.skip(testFileLength);
+ assertEquals(testFileLength, skipped);
+
+ intercept(EOFException.class,
+ new Callable<Long>() {
+ @Override
+ public Long call() throws Exception {
+ return inputStream.skip(1);
+ }
+ }
+ );
+ long elapsedTimeMs = timer.elapsedTimeMs();
+ assertTrue(
+ String.format(
+ "There should not be any network I/O (elapsedTimeMs=%1$d).",
+ elapsedTimeMs),
+ elapsedTimeMs < 20);
+ }
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek and ensures there is no
+ * network I/O for forward seek.
+ * @throws Exception
+ */
+ @Test
+ public void test_0309_SeekBoundsV1() throws Exception {
+ validateSeekBounds(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek and ensures there is no
+ * network I/O for forward seek.
+ * @throws Exception
+ */
+ @Test
+ public void test_0310_SeekBoundsV2() throws Exception {
+ validateSeekBounds(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSeekBounds(FileSystem fs) throws Exception {
+ assumeHugeFileExists();
+ try (
+ FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
+ ) {
+ NanoTimer timer = new NanoTimer();
+
+ inputStream.seek(0);
+ assertEquals(0, inputStream.getPos());
+
+ intercept(EOFException.class,
+ FSExceptionMessages.NEGATIVE_SEEK,
+ new Callable<FSDataInputStream>() {
+ @Override
+ public FSDataInputStream call() throws Exception {
+ inputStream.seek(-1);
+ return inputStream;
+ }
+ }
+ );
+
+ assertTrue("Test file length only " + testFileLength, testFileLength > 0);
+ inputStream.seek(testFileLength);
+ assertEquals(testFileLength, inputStream.getPos());
+
+ intercept(EOFException.class,
+ FSExceptionMessages.CANNOT_SEEK_PAST_EOF,
+ new Callable<FSDataInputStream>() {
+ @Override
+ public FSDataInputStream call() throws Exception {
+ inputStream.seek(testFileLength + 1);
+ return inputStream;
+ }
+ }
+ );
+
+ long elapsedTimeMs = timer.elapsedTimeMs();
+ assertTrue(
+ String.format(
+ "There should not be any network I/O (elapsedTimeMs=%1$d).",
+ elapsedTimeMs),
+ elapsedTimeMs < 20);
+ }
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek, Seekable.getPos,
+ * and InputStream.available.
+ * @throws Exception
+ */
+ @Test
+ public void test_0311_SeekAndAvailableAndPositionV1() throws Exception {
+ validateSeekAndAvailableAndPosition(
+ accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek, Seekable.getPos,
+ * and InputStream.available.
+ * @throws Exception
+ */
+ @Test
+ public void test_0312_SeekAndAvailableAndPositionV2() throws Exception {
+ validateSeekAndAvailableAndPosition(
+ accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSeekAndAvailableAndPosition(FileSystem fs)
+ throws Exception {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
+ byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
+ byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
+ byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
+ byte[] buffer = new byte[3];
+
+ int bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected1, buffer);
+ assertEquals(buffer.length, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected2, buffer);
+ assertEquals(2 * buffer.length, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // reverse seek
+ int seekPos = 0;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected1, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // reverse seek
+ seekPos = 1;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected3, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // forward seek
+ seekPos = 6;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected4, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ }
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip, Seekable.getPos,
+ * and InputStream.available.
+ * @throws IOException
+ */
+ @Test
+ public void test_0313_SkipAndAvailableAndPositionV1() throws IOException {
+ validateSkipAndAvailableAndPosition(
+ accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip, Seekable.getPos,
+ * and InputStream.available.
+ * @throws IOException
+ */
+ @Test
+ public void test_0314_SkipAndAvailableAndPositionV2() throws IOException {
+ validateSkipAndAvailableAndPosition(
+ accountUsingInputStreamV1.getFileSystem());
+ }
+
+ private void validateSkipAndAvailableAndPosition(FileSystem fs)
+ throws IOException {
+ assumeHugeFileExists();
+ try (
+ FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
+ ) {
+ byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
+ byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
+ byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
+ byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
+
+ assertEquals(testFileLength, inputStream.available());
+ assertEquals(0, inputStream.getPos());
+
+ int n = 3;
+ long skipped = inputStream.skip(n);
+
+ assertEquals(skipped, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ assertEquals(skipped, n);
+
+ byte[] buffer = new byte[3];
+ int bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected2, buffer);
+ assertEquals(buffer.length + skipped, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // does skip still work after seek?
+ int seekPos = 1;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected3, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ long currentPosition = inputStream.getPos();
+ n = 2;
+ skipped = inputStream.skip(n);
+
+ assertEquals(currentPosition + skipped, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ assertEquals(skipped, n);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected4, buffer);
+ assertEquals(buffer.length + skipped + currentPosition,
+ inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ }
+ }
+
+ /**
+ * Ensures parity in the performance of sequential read for
+ * version 1 and version 2 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0315_SequentialReadPerformance() throws IOException {
+ assumeHugeFileExists();
+ final int maxAttempts = 10;
+ final double maxAcceptableRatio = 1.01;
+ double v1ElapsedMs = 0, v2ElapsedMs = 0;
+ double ratio = Double.MAX_VALUE;
+ for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+ v1ElapsedMs = sequentialRead(1,
+ accountUsingInputStreamV1.getFileSystem(), false);
+ v2ElapsedMs = sequentialRead(2,
+ accountUsingInputStreamV2.getFileSystem(), false);
+ ratio = v2ElapsedMs / v1ElapsedMs;
+ LOG.info(String.format(
+ "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio));
+ }
+ assertTrue(String.format(
+ "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
+ + " v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio),
+ ratio < maxAcceptableRatio);
+ }
+
+ /**
+ * Ensures parity in the performance of sequential read after reverse seek for
+ * version 2 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0316_SequentialReadAfterReverseSeekPerformanceV2()
+ throws IOException {
+ assumeHugeFileExists();
+ final int maxAttempts = 10;
+ final double maxAcceptableRatio = 1.01;
+ double beforeSeekElapsedMs = 0, afterSeekElapsedMs = 0;
+ double ratio = Double.MAX_VALUE;
+ for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+ beforeSeekElapsedMs = sequentialRead(2,
+ accountUsingInputStreamV2.getFileSystem(), false);
+ afterSeekElapsedMs = sequentialRead(2,
+ accountUsingInputStreamV2.getFileSystem(), true);
+ ratio = afterSeekElapsedMs / beforeSeekElapsedMs;
+ LOG.info(String.format(
+ "beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d, ratio=%3$.2f",
+ (long) beforeSeekElapsedMs,
+ (long) afterSeekElapsedMs,
+ ratio));
+ }
+ assertTrue(String.format(
+ "Performance of version 2 after reverse seek is not acceptable:"
+ + " beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d,"
+ + " ratio=%3$.2f",
+ (long) beforeSeekElapsedMs,
+ (long) afterSeekElapsedMs,
+ ratio),
+ ratio < maxAcceptableRatio);
+ }
+
+ private long sequentialRead(int version,
+ FileSystem fs,
+ boolean afterReverseSeek) throws IOException {
+ byte[] buffer = new byte[16 * KILOBYTE];
+ long totalBytesRead = 0;
+ long bytesRead = 0;
+
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ if (afterReverseSeek) {
+ while (bytesRead > 0 && totalBytesRead < 4 * MEGABYTE) {
+ bytesRead = inputStream.read(buffer);
+ totalBytesRead += bytesRead;
+ }
+ totalBytesRead = 0;
+ inputStream.seek(0);
+ }
+
+ NanoTimer timer = new NanoTimer();
+ while ((bytesRead = inputStream.read(buffer)) > 0) {
+ totalBytesRead += bytesRead;
+ }
+ long elapsedTimeMs = timer.elapsedTimeMs();
+
+ LOG.info(String.format(
+ "v%1$d: bytesRead=%2$d, elapsedMs=%3$d, Mbps=%4$.2f,"
+ + " afterReverseSeek=%5$s",
+ version,
+ totalBytesRead,
+ elapsedTimeMs,
+ toMbps(totalBytesRead, elapsedTimeMs),
+ afterReverseSeek));
+
+ assertEquals(testFileLength, totalBytesRead);
+ inputStream.close();
+ return elapsedTimeMs;
+ }
+ }
+
+ @Test
+ public void test_0317_RandomReadPerformance() throws IOException {
+ assumeHugeFileExists();
+ final int maxAttempts = 10;
+ final double maxAcceptableRatio = 0.10;
+ double v1ElapsedMs = 0, v2ElapsedMs = 0;
+ double ratio = Double.MAX_VALUE;
+ for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+ v1ElapsedMs = randomRead(1,
+ accountUsingInputStreamV1.getFileSystem());
+ v2ElapsedMs = randomRead(2,
+ accountUsingInputStreamV2.getFileSystem());
+ ratio = v2ElapsedMs / v1ElapsedMs;
+ LOG.info(String.format(
+ "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio));
+ }
+ assertTrue(String.format(
+ "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
+ + " v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio),
+ ratio < maxAcceptableRatio);
+ }
+
+ private long randomRead(int version, FileSystem fs) throws IOException {
+ assumeHugeFileExists();
+ final int minBytesToRead = 2 * MEGABYTE;
+ Random random = new Random();
+ byte[] buffer = new byte[8 * KILOBYTE];
+ long totalBytesRead = 0;
+ long bytesRead = 0;
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ NanoTimer timer = new NanoTimer();
+
+ do {
+ bytesRead = inputStream.read(buffer);
+ totalBytesRead += bytesRead;
+ inputStream.seek(random.nextInt(
+ (int) (testFileLength - buffer.length)));
+ } while (bytesRead > 0 && totalBytesRead < minBytesToRead);
+
+ long elapsedTimeMs = timer.elapsedTimeMs();
+
+ inputStream.close();
+
+ LOG.info(String.format(
+ "v%1$d: totalBytesRead=%2$d, elapsedTimeMs=%3$d, Mbps=%4$.2f",
+ version,
+ totalBytesRead,
+ elapsedTimeMs,
+ toMbps(totalBytesRead, elapsedTimeMs)));
+
+ assertTrue(minBytesToRead <= totalBytesRead);
+
+ return elapsedTimeMs;
+ }
+ }
+
+ @Test
+ public void test_999_DeleteHugeFiles() throws IOException {
+ try {
+ NanoTimer timer = new NanoTimer();
+ NativeAzureFileSystem fs = getFileSystem();
+ fs.delete(TEST_FILE_PATH, false);
+ timer.end("time to delete %s", TEST_FILE_PATH);
+ } finally {
+ // clean up the test account
+ AzureTestUtils.cleanupTestAccount(accountUsingInputStreamV1);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
new file mode 100644
index 0000000..cc3baf5
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.FileNotFoundException;
+import java.util.EnumSet;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+/**
+ * Tests that WASB creates containers only if needed.
+ */
+public class ITestContainerChecks extends AbstractWasbTestWithTimeout {
+ private AzureBlobStorageTestAccount testAccount;
+ private boolean runningInSASMode = false;
+
+ @After
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ }
+
+ @Before
+ public void setMode() {
+ runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
+ getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
+ }
+
+ @Test
+ public void testContainerExistAfterDoesNotExist() throws Exception {
+ testAccount = blobStorageTestAccount();
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Starting off with the container not there
+ assertFalse(container.exists());
+
+ // A list shouldn't create the container and will set file system store
+ // state to DoesNotExist
+ try {
+ fs.listStatus(new Path("/"));
+ assertTrue("Should've thrown.", false);
+ } catch (FileNotFoundException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("does not exist."));
+ }
+ assertFalse(container.exists());
+
+ // Create a container outside of the WASB FileSystem
+ container.create();
+ // Add a file to the container outside of the WASB FileSystem
+ CloudBlockBlob blob = testAccount.getBlobReference("foo");
+ BlobOutputStream outputStream = blob.openOutputStream();
+ outputStream.write(new byte[10]);
+ outputStream.close();
+
+ // Make sure the file is visible
+ assertTrue(fs.exists(new Path("/foo")));
+ assertTrue(container.exists());
+ }
+
+ protected AzureBlobStorageTestAccount blobStorageTestAccount()
+ throws Exception {
+ return AzureBlobStorageTestAccount.create("",
+ EnumSet.noneOf(CreateOptions.class));
+ }
+
+ @Test
+ public void testContainerCreateAfterDoesNotExist() throws Exception {
+ testAccount = blobStorageTestAccount();
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Starting off with the container not there
+ assertFalse(container.exists());
+
+ // A list shouldn't create the container and will set file system store
+ // state to DoesNotExist
+ try {
+ assertNull(fs.listStatus(new Path("/")));
+ assertTrue("Should've thrown.", false);
+ } catch (FileNotFoundException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("does not exist."));
+ }
+ assertFalse(container.exists());
+
+ // Create a container outside of the WASB FileSystem
+ container.create();
+
+ // Write should succeed
+ assertTrue(fs.createNewFile(new Path("/foo")));
+ assertTrue(container.exists());
+ }
+
+ @Test
+ public void testContainerCreateOnWrite() throws Exception {
+ testAccount = blobStorageTestAccount();
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Starting off with the container not there
+ assertFalse(container.exists());
+
+ // A list shouldn't create the container.
+ try {
+ fs.listStatus(new Path("/"));
+ assertTrue("Should've thrown.", false);
+ } catch (FileNotFoundException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("does not exist."));
+ }
+ assertFalse(container.exists());
+
+ // Neither should a read.
+ Path foo = new Path("/testContainerCreateOnWrite-foo");
+ Path bar = new Path("/testContainerCreateOnWrite-bar");
+ LambdaTestUtils.intercept(FileNotFoundException.class,
+ new Callable<String>() {
+ @Override
+ public String call() throws Exception {
+ fs.open(foo).close();
+ return "Stream to " + foo;
+ }
+ }
+ );
+ assertFalse(container.exists());
+
+ // Neither should a rename
+ assertFalse(fs.rename(foo, bar));
+ assertFalse(container.exists());
+
+ // But a write should.
+ assertTrue(fs.createNewFile(foo));
+ assertTrue(container.exists());
+ }
+
+ @Test
+ public void testContainerChecksWithSas() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ testAccount = AzureBlobStorageTestAccount.create("",
+ EnumSet.of(CreateOptions.UseSas));
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // The container shouldn't be there
+ assertFalse(container.exists());
+
+ // A write should just fail
+ try {
+ fs.createNewFile(new Path("/testContainerChecksWithSas-foo"));
+ assertFalse("Should've thrown.", true);
+ } catch (AzureException ex) {
+ }
+ assertFalse(container.exists());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java
new file mode 100644
index 0000000..a45dae4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java
@@ -0,0 +1,283 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.FileNotFoundException;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.After;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*;
+
+/**
+ * Single threaded exception handling.
+ */
+public class ITestFileSystemOperationExceptionHandling
+ extends AbstractWasbTestBase {
+
+ private FSDataInputStream inputStream = null;
+
+ private Path testPath;
+ private Path testFolderPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = path("testfile.dat");
+ testFolderPath = path("testfolder");
+ }
+
+ /**
+ * Helper method that creates a InputStream to validate exceptions
+ * for various scenarios.
+ */
+ private void setupInputStreamToTest(AzureBlobStorageTestAccount testAccount)
+ throws Exception {
+
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Step 1: Create a file and write dummy data.
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path testFilePath2 = new Path(base, "test2.dat");
+ FSDataOutputStream outputStream = fs.create(testFilePath1);
+ String testString = "This is a test string";
+ outputStream.write(testString.getBytes());
+ outputStream.close();
+
+ // Step 2: Open a read stream on the file.
+ inputStream = fs.open(testFilePath1);
+
+ // Step 3: Rename the file
+ fs.rename(testFilePath1, testFilePath2);
+ }
+
+ /**
+ * Tests a basic single threaded read scenario for Page blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingleThreadedPageBlobReadScenario() throws Throwable {
+ AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+ setupInputStreamToTest(testAccount);
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Tests a basic single threaded seek scenario for Page blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingleThreadedPageBlobSeekScenario() throws Throwable {
+ AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+ setupInputStreamToTest(testAccount);
+ inputStream.seek(5);
+ }
+
+ /**
+ * Test a basic single thread seek scenario for Block blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingleThreadBlockBlobSeekScenario() throws Throwable {
+
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ setupInputStreamToTest(testAccount);
+ inputStream.seek(5);
+ inputStream.read();
+ }
+
+ /**
+ * Tests a basic single threaded read scenario for Block blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingledThreadBlockBlobReadScenario() throws Throwable{
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ setupInputStreamToTest(testAccount);
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobSetPermissionScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(), testPath);
+ fs.delete(testPath, true);
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobSetPermissionScenario()
+ throws Throwable {
+ createEmptyFile(getPageBlobTestStorageAccount(), testPath);
+ fs.delete(testPath, true);
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobSetOwnerScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(), testPath);
+ fs.delete(testPath, true);
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobSetOwnerScenario() throws Throwable {
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobListStatusScenario() throws Throwable {
+ createTestFolder(createTestAccount(),
+ testFolderPath);
+ fs.delete(testFolderPath, true);
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobListStatusScenario() throws Throwable {
+ createTestFolder(getPageBlobTestStorageAccount(),
+ testFolderPath);
+ fs.delete(testFolderPath, true);
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedBlockBlobRenameScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ Path dstPath = new Path("dstFile.dat");
+ fs.delete(testPath, true);
+ boolean renameResult = fs.rename(testPath, dstPath);
+ assertFalse(renameResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedPageBlobRenameScenario() throws Throwable {
+
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ Path dstPath = new Path("dstFile.dat");
+ fs.delete(testPath, true);
+ boolean renameResult = fs.rename(testPath, dstPath);
+ assertFalse(renameResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedBlockBlobDeleteScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ boolean deleteResult = fs.delete(testPath, true);
+ assertFalse(deleteResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedPageBlobDeleteScenario() throws Throwable {
+
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ boolean deleteResult = fs.delete(testPath, true);
+ assertFalse(deleteResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ inputStream = fs.open(testPath);
+ }
+
+ /**
+ * Test delete then open a file.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ inputStream = fs.open(testPath);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (inputStream != null) {
+ inputStream.close();
+ }
+
+ ContractTestUtils.rm(fs, testPath, true, true);
+ super.tearDown();
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount()
+ throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java
new file mode 100644
index 0000000..6d5e72e
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.net.URI;
+import java.util.UUID;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.microsoft.azure.storage.CloudStorageAccount;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
+
+/**
+ * Test for error messages coming from SDK.
+ */
+public class ITestFileSystemOperationExceptionMessage
+ extends AbstractWasbTestWithTimeout {
+
+
+
+ @Test
+ public void testAnonymouseCredentialExceptionMessage() throws Throwable {
+
+ Configuration conf = AzureBlobStorageTestAccount.createTestConfiguration();
+ CloudStorageAccount account =
+ AzureBlobStorageTestAccount.createTestAccount(conf);
+ AzureTestUtils.assume("No test account", account != null);
+
+ String testStorageAccount = conf.get("fs.azure.test.account.name");
+ conf = new Configuration();
+ conf.set("fs.AbstractFileSystem.wasb.impl",
+ "org.apache.hadoop.fs.azure.Wasb");
+ conf.set("fs.azure.skip.metrics", "true");
+
+ String testContainer = UUID.randomUUID().toString();
+ String wasbUri = String.format("wasb://%s@%s",
+ testContainer, testStorageAccount);
+
+ try(NativeAzureFileSystem filesystem = new NativeAzureFileSystem()) {
+ filesystem.initialize(new URI(wasbUri), conf);
+ fail("Expected an exception, got " + filesystem);
+ } catch (Exception ex) {
+
+ Throwable innerException = ex.getCause();
+ while (innerException != null
+ && !(innerException instanceof AzureException)) {
+ innerException = innerException.getCause();
+ }
+
+ if (innerException != null) {
+ GenericTestUtils.assertExceptionContains(String.format(
+ NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
+ ex);
+ } else {
+ fail("No inner azure exception");
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
new file mode 100644
index 0000000..175a9ec
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.FileNotFoundException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*;
+
+/**
+ * Multithreaded operations on FS, verify failures are as expected.
+ */
+public class ITestFileSystemOperationsExceptionHandlingMultiThreaded
+ extends AbstractWasbTestBase {
+
+ FSDataInputStream inputStream = null;
+
+ private Path testPath;
+ private Path testFolderPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = path("testfile.dat");
+ testFolderPath = path("testfolder");
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+
+ IOUtils.closeStream(inputStream);
+ ContractTestUtils.rm(fs, testPath, true, false);
+ ContractTestUtils.rm(fs, testFolderPath, true, false);
+ super.tearDown();
+ }
+
+ /**
+ * Helper method to creates an input stream to test various scenarios.
+ */
+ private void getInputStreamToTest(FileSystem fs, Path testPath)
+ throws Throwable {
+
+ FSDataOutputStream outputStream = fs.create(testPath);
+ String testString = "This is a test string";
+ outputStream.write(testString.getBytes());
+ outputStream.close();
+
+ inputStream = fs.open(testPath);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded read
+ * scenario for block blobs.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobReadScenario() throws Throwable {
+
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded seek
+ * scenario for block blobs.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadBlockBlobSeekScenario() throws Throwable {
+
+/*
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ fs = testAccount.getFileSystem();
+*/
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+
+ inputStream.seek(5);
+ inputStream.read();
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobSetPermissionScenario()
+ throws Throwable {
+ createEmptyFile(
+ getPageBlobTestStorageAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobSetPermissionScenario()
+ throws Throwable {
+ createEmptyFile(createTestAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(
+ getPageBlobTestStorageAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+
+ while (t.isAlive()) {
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+
+ /**
+ * Tests basic multi threaded setOwner scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(), testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic multi threaded setOwner scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable {
+ createEmptyFile(
+ getPageBlobTestStorageAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic multi threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable {
+
+ createTestFolder(createTestAccount(),
+ testFolderPath);
+ Thread t = new Thread(new DeleteThread(fs, testFolderPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.listStatus(testFolderPath);
+ }
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Tests basic multi threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobListStatusScenario() throws Throwable {
+
+ createTestFolder(
+ getPageBlobTestStorageAccount(),
+ testFolderPath);
+ Thread t = new Thread(new DeleteThread(fs, testFolderPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.listStatus(testFolderPath);
+ }
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded read
+ * scenario for page blobs.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobReadScenario() throws Throwable {
+
+ bindToTestAccount(getPageBlobTestStorageAccount());
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded seek
+ * scenario for page blobs.
+ */
+
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobSeekScenario() throws Throwable {
+
+ bindToTestAccount(getPageBlobTestStorageAccount());
+
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+ inputStream.seek(5);
+ }
+
+
+ /**
+ * Helper thread that just renames the test file.
+ */
+ private static class RenameThread implements Runnable {
+
+ private final FileSystem fs;
+ private final Path testPath;
+ private final Path renamePath;
+
+ RenameThread(FileSystem fs,
+ Path testPath,
+ Path renamePath) {
+ this.fs = fs;
+ this.testPath = testPath;
+ this.renamePath = renamePath;
+ }
+
+ @Override
+ public void run() {
+ try {
+ fs.rename(testPath, renamePath);
+ } catch (Exception e) {
+ // Swallowing the exception as the
+ // correctness of the test is controlled
+ // by the other thread
+ }
+ }
+ }
+
+ private static class DeleteThread implements Runnable {
+ private final FileSystem fs;
+ private final Path testPath;
+
+ DeleteThread(FileSystem fs, Path testPath) {
+ this.fs = fs;
+ this.testPath = testPath;
+ }
+
+ @Override
+ public void run() {
+ try {
+ fs.delete(testPath, true);
+ } catch (Exception e) {
+ // Swallowing the exception as the
+ // correctness of the test is controlled
+ // by the other thread
+ }
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
HADOOP-14553. Add (parallelized) integration tests to hadoop-azure
Contributed by Steve Loughran
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d2d97fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d2d97fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d2d97fa
Branch: refs/heads/trunk
Commit: 2d2d97fa7d4224369b3c13bc4a45e8cc9e29afb1
Parents: 11390c2
Author: Steve Loughran <st...@apache.org>
Authored: Fri Sep 15 17:03:01 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Sep 15 17:03:01 2017 +0100
----------------------------------------------------------------------
.../hadoop/fs/FileSystemContractBaseTest.java | 11 +-
.../fs/contract/AbstractContractOpenTest.java | 4 +-
.../fs/contract/AbstractContractSeekTest.java | 2 +-
hadoop-tools/hadoop-azure/pom.xml | 251 ++++++
.../fs/azure/AzureNativeFileSystemStore.java | 2 +-
.../hadoop-azure/src/site/markdown/index.md | 94 +-
.../src/site/markdown/testing_azure.md | 576 ++++++++++++
.../hadoop/fs/azure/AbstractWasbTestBase.java | 136 ++-
.../fs/azure/AbstractWasbTestWithTimeout.java | 73 ++
.../fs/azure/AzureBlobStorageTestAccount.java | 42 +-
.../azure/ITestAzureConcurrentOutOfBandIo.java | 179 ++++
...zureConcurrentOutOfBandIoWithSecureMode.java | 33 +
.../ITestAzureFileSystemErrorConditions.java | 243 +++++
.../fs/azure/ITestBlobDataValidation.java | 244 ++++++
.../fs/azure/ITestBlobTypeSpeedDifference.java | 163 ++++
.../fs/azure/ITestBlockBlobInputStream.java | 874 ++++++++++++++++++
.../hadoop/fs/azure/ITestContainerChecks.java | 194 ++++
...estFileSystemOperationExceptionHandling.java | 283 ++++++
...TestFileSystemOperationExceptionMessage.java | 79 ++
...perationsExceptionHandlingMultiThreaded.java | 366 ++++++++
.../ITestFileSystemOperationsWithThreads.java | 821 +++++++++++++++++
...stNativeAzureFSAuthWithBlobSpecificKeys.java | 40 +
.../ITestNativeAzureFSAuthorizationCaching.java | 53 ++
.../azure/ITestNativeAzureFSPageBlobLive.java | 43 +
.../azure/ITestNativeAzureFileSystemAppend.java | 350 ++++++++
...ativeAzureFileSystemAtomicRenameDirList.java | 55 ++
...veAzureFileSystemAuthorizationWithOwner.java | 122 +++
...ITestNativeAzureFileSystemClientLogging.java | 136 +++
...estNativeAzureFileSystemConcurrencyLive.java | 185 ++++
...stNativeAzureFileSystemContractEmulator.java | 65 ++
.../ITestNativeAzureFileSystemContractLive.java | 108 +++
...tiveAzureFileSystemContractPageBlobLive.java | 114 +++
.../azure/ITestNativeAzureFileSystemLive.java | 236 +++++
.../ITestOutOfBandAzureBlobOperationsLive.java | 185 ++++
.../ITestReadAndSeekPageBlobAfterWrite.java | 341 ++++++++
.../fs/azure/ITestWasbRemoteCallHelper.java | 568 ++++++++++++
.../fs/azure/ITestWasbUriAndConfiguration.java | 610 +++++++++++++
.../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 103 ++-
.../fs/azure/NativeAzureFileSystemBaseTest.java | 115 ++-
.../hadoop/fs/azure/RunningLiveWasbTests.txt | 22 -
.../azure/TestAzureConcurrentOutOfBandIo.java | 195 -----
...zureConcurrentOutOfBandIoWithSecureMode.java | 50 --
.../TestAzureFileSystemErrorConditions.java | 244 ------
.../hadoop/fs/azure/TestBlobDataValidation.java | 237 -----
.../hadoop/fs/azure/TestBlobMetadata.java | 7 +-
.../fs/azure/TestBlobOperationDescriptor.java | 3 -
.../fs/azure/TestBlobTypeSpeedDifference.java | 160 ----
.../fs/azure/TestBlockBlobInputStream.java | 875 -------------------
.../fs/azure/TestClientThrottlingAnalyzer.java | 5 +-
.../hadoop/fs/azure/TestContainerChecks.java | 185 ----
...estFileSystemOperationExceptionHandling.java | 269 ------
...TestFileSystemOperationExceptionMessage.java | 79 --
...perationsExceptionHandlingMultiThreaded.java | 330 -------
.../TestFileSystemOperationsWithThreads.java | 821 -----------------
...stNativeAzureFSAuthWithBlobSpecificKeys.java | 44 -
.../TestNativeAzureFSAuthorizationCaching.java | 60 --
.../fs/azure/TestNativeAzureFSPageBlobLive.java | 43 -
.../azure/TestNativeAzureFileSystemAppend.java | 362 --------
...ativeAzureFileSystemAtomicRenameDirList.java | 50 --
.../TestNativeAzureFileSystemAuthorization.java | 53 +-
...veAzureFileSystemAuthorizationWithOwner.java | 122 ---
...TestNativeAzureFileSystemBlockLocations.java | 8 +-
.../TestNativeAzureFileSystemClientLogging.java | 140 ---
.../TestNativeAzureFileSystemConcurrency.java | 29 +-
...estNativeAzureFileSystemConcurrencyLive.java | 184 ----
...stNativeAzureFileSystemContractEmulator.java | 48 -
.../TestNativeAzureFileSystemContractLive.java | 80 --
...TestNativeAzureFileSystemContractMocked.java | 3 +
...tiveAzureFileSystemContractPageBlobLive.java | 93 --
.../TestNativeAzureFileSystemFileNameCheck.java | 28 +-
.../fs/azure/TestNativeAzureFileSystemLive.java | 242 -----
.../azure/TestNativeAzureFileSystemMocked.java | 4 +
.../TestNativeAzureFileSystemUploadLogic.java | 78 +-
.../azure/TestOutOfBandAzureBlobOperations.java | 8 +-
.../TestOutOfBandAzureBlobOperationsLive.java | 203 -----
.../TestReadAndSeekPageBlobAfterWrite.java | 355 --------
.../azure/TestShellDecryptionKeyProvider.java | 15 +-
.../apache/hadoop/fs/azure/TestWasbFsck.java | 9 +-
.../fs/azure/TestWasbRemoteCallHelper.java | 569 ------------
.../fs/azure/TestWasbUriAndConfiguration.java | 617 -------------
.../ITestAzureNativeContractAppend.java | 41 +
.../ITestAzureNativeContractCreate.java | 34 +
.../ITestAzureNativeContractDelete.java | 33 +
.../ITestAzureNativeContractDistCp.java | 47 +
.../ITestAzureNativeContractGetFileStatus.java | 35 +
.../contract/ITestAzureNativeContractMkdir.java | 33 +
.../contract/ITestAzureNativeContractOpen.java | 34 +
.../ITestAzureNativeContractRename.java | 34 +
.../contract/ITestAzureNativeContractSeek.java | 34 +
.../contract/NativeAzureFileSystemContract.java | 19 +-
.../contract/TestAzureNativeContractAppend.java | 37 -
.../contract/TestAzureNativeContractCreate.java | 30 -
.../contract/TestAzureNativeContractDelete.java | 30 -
.../contract/TestAzureNativeContractDistCp.java | 33 -
.../TestAzureNativeContractGetFileStatus.java | 30 -
.../contract/TestAzureNativeContractMkdir.java | 30 -
.../contract/TestAzureNativeContractOpen.java | 30 -
.../contract/TestAzureNativeContractRename.java | 30 -
.../contract/TestAzureNativeContractSeek.java | 30 -
.../integration/AbstractAzureScaleTest.java | 66 ++
.../azure/integration/AzureTestConstants.java | 180 ++++
.../fs/azure/integration/AzureTestUtils.java | 479 ++++++++++
.../integration/CleanupTestContainers.java | 87 ++
.../azure/integration/ITestAzureHugeFiles.java | 456 ++++++++++
.../hadoop/fs/azure/integration/Sizes.java | 43 +
.../ITestAzureFileSystemInstrumentation.java | 586 +++++++++++++
.../TestAzureFileSystemInstrumentation.java | 579 ------------
107 files changed, 10227 insertions(+), 7901 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index b49dd53..a4ccee3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -61,7 +61,16 @@ public abstract class FileSystemContractBaseTest {
protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
@Rule
- public Timeout globalTimeout = new Timeout(30000);
+ public Timeout globalTimeout = new Timeout(getGlobalTimeout());
+
+ /**
+ * Get the timeout in milliseconds for each test case.
+ * @return a time in milliseconds.
+ */
+ protected int getGlobalTimeout() {
+ return 30 * 1000;
+ }
+
@Rule
public ExpectedException thrown = ExpectedException.none();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index f9b16f4..ccf188f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -122,7 +122,7 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
Path path = path("testopenfiletwice.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
- createFile(getFileSystem(), path, false, block);
+ createFile(getFileSystem(), path, true, block);
//open first
FSDataInputStream instream1 = getFileSystem().open(path);
FSDataInputStream instream2 = null;
@@ -150,7 +150,7 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
int base = 0x40; // 64
byte[] block = dataset(len, base, base + len);
//this file now has a simple rule: offset => (value | 0x40)
- createFile(getFileSystem(), path, false, block);
+ createFile(getFileSystem(), path, true, block);
//open first
instream = getFileSystem().open(path);
assertEquals(base, instream.read());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index 3e71682..7af3cb0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -341,7 +341,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
int filesize = 10 * 1024;
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
- createFile(getFileSystem(), randomSeekFile, false, buf);
+ createFile(getFileSystem(), randomSeekFile, true, buf);
Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 0c5ac63..b479872 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -34,6 +34,15 @@
<properties>
<file.encoding>UTF-8</file.encoding>
<downloadSources>true</downloadSources>
+ <hadoop.tmp.dir>${project.build.directory}/test</hadoop.tmp.dir>
+ <!-- are scale tests enabled ? -->
+ <fs.azure.scale.test.enabled>unset</fs.azure.scale.test.enabled>
+ <!-- Size in MB of huge files. -->
+ <fs.azure.scale.test.huge.filesize>unset</fs.azure.scale.test.huge.filesize>
+ <!-- Size in MB of the partion size in huge file uploads. -->
+ <fs.azure.scale.test.huge.partitionsize>unset</fs.azure.scale.test.huge.partitionsize>
+ <!-- Timeout in seconds for scale tests.-->
+ <fs.azure.scale.test.timeout>7200</fs.azure.scale.test.timeout>
</properties>
<build>
@@ -224,4 +233,246 @@
</dependency>
</dependencies>
+
+ <profiles>
+ <profile>
+ <id>parallel-tests</id>
+ <activation>
+ <property>
+ <name>parallel-tests</name>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-parallel-tests-dirs</id>
+ <phase>test-compile</phase>
+ <configuration>
+ <target>
+ <script language="javascript"><![CDATA[
+ var baseDirs = [
+ project.getProperty("test.build.data"),
+ project.getProperty("test.build.dir"),
+ project.getProperty("hadoop.tmp.dir")
+ ];
+ for (var i in baseDirs) {
+ for (var j = 1; j <= ${testsThreadCount}; ++j) {
+ var mkdir = project.createTask("mkdir");
+ mkdir.setDir(new java.io.File(baseDirs[i], j));
+ mkdir.perform();
+ }
+ }
+ ]]></script>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>default-test</id>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ <configuration>
+ <forkCount>1</forkCount>
+ <forkCount>${testsThreadCount}</forkCount>
+ <reuseForks>false</reuseForks>
+ <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+ <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+ <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+ <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <includes>
+ <include>**/Test*.java</include>
+ </includes>
+ <excludes>
+ <exclude>**/TestRollingWindowAverage*.java</exclude>
+ </excludes>
+ </configuration>
+ </execution>
+ <execution>
+ <id>serialized-test</id>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ <configuration>
+ <forkCount>1</forkCount>
+ <reuseForks>false</reuseForks>
+ <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+ <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+ <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+ <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <includes>
+ <include>**/TestRollingWindowAverage*.java</include>
+ </includes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>default-integration-test</id>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <forkCount>${testsThreadCount}</forkCount>
+ <reuseForks>false</reuseForks>
+ <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <!-- Tell tests that they are being executed in parallel -->
+ <test.parallel.execution>true</test.parallel.execution>
+ <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+ <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+ <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+
+ <!-- Due to a Maven quirk, setting this to just -->
+ <!-- surefire.forkNumber won't do the parameter -->
+ <!-- substitution. Putting a prefix in front of it like -->
+ <!-- "fork-" makes it work. -->
+ <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+ <!-- Propagate scale parameters -->
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <!-- Some tests cannot run in parallel. Tests that cover -->
+ <!-- access to the root directory must run in isolation -->
+ <!-- from anything else that could modify the bucket. -->
+ <!-- azure tests that cover multi-part upload must run in -->
+ <!-- isolation, because the file system is configured to -->
+ <!-- purge existing multi-part upload data on -->
+ <!-- initialization. MiniYARNCluster has not yet been -->
+ <!-- changed to handle parallel test execution gracefully. -->
+ <!-- Exclude all of these tests from parallel execution, -->
+ <!-- and instead run them sequentially in a separate -->
+ <!-- Surefire execution step later. -->
+ <includes>
+ <include>**/ITest*.java</include>
+ </includes>
+ <excludes>
+ <exclude>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</exclude>
+ <exclude>**/ITestFileSystemOperationsWithThreads.java</exclude>
+ <exclude>**/ITestOutOfBandAzureBlobOperationsLive.java</exclude>
+ <exclude>**/ITestNativeAzureFileSystemAuthorizationWithOwner.java</exclude>
+ <exclude>**/ITestNativeAzureFileSystemConcurrencyLive.java</exclude>
+ <exclude>**/ITestNativeAzureFileSystemLive.java</exclude>
+ <exclude>**/ITestNativeAzureFSPageBlobLive.java</exclude>
+ <exclude>**/ITestWasbRemoteCallHelper.java</exclude>
+ <exclude>**/ITestBlockBlobInputStream.java</exclude>
+ </excludes>
+ </configuration>
+ </execution>
+ <!-- Do a sequential run for tests that cannot handle -->
+ <!-- parallel execution. -->
+ <execution>
+ <id>sequential-integration-tests</id>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <test.parallel.execution>false</test.parallel.execution>
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <includes>
+ <include>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</include>
+ <include>**/ITestFileSystemOperationsWithThreads.java</include>
+ <include>**/ITestOutOfBandAzureBlobOperationsLive.java</include>
+ <include>**/ITestNativeAzureFileSystemAuthorizationWithOwner.java</include>
+ <include>**/ITestNativeAzureFileSystemConcurrencyLive.java</include>
+ <include>**/ITestNativeAzureFileSystemLive.java</include>
+ <include>**/ITestWasbRemoteCallHelper.java</include>
+ <include>**/ITestBlockBlobInputStream.java</include>
+ </includes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ <profile>
+ <id>sequential-tests</id>
+ <activation>
+ <property>
+ <name>!parallel-tests</name>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <systemPropertyVariables>
+ <!-- Propagate scale parameters -->
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+
+ <!-- Turn on scale tests-->
+ <profile>
+ <id>scale</id>
+ <activation>
+ <property>
+ <name>scale</name>
+ </property>
+ </activation>
+ <properties>
+ <fs.azure.scale.test.enabled>true</fs.azure.scale.test.enabled>
+ </properties>
+ </profile>
+ </profiles>
</project>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 639862f..f1031b4 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -346,7 +346,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
private String delegationToken;
/** The error message template when container is not accessible. */
- static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
+ public static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
+ "account %s in the configuration, and its container %s is not "
+ "accessible using anonymous credentials. Please check if the container "
+ "exists first. If it is not publicly available, you have to provide "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 466bf0b..876d7cc 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -519,96 +519,8 @@ The maximum number of entries that that cache can hold can be customized using t
<value>true</value>
</property>
```
-## Testing the hadoop-azure Module
-The hadoop-azure module includes a full suite of unit tests. Most of the tests
-will run without additional configuration by running `mvn test`. This includes
-tests against mocked storage, which is an in-memory emulation of Azure Storage.
-
-A selection of tests can run against the
-[Azure Storage Emulator](http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx)
-which is a high-fidelity emulation of live Azure Storage. The emulator is
-sufficient for high-confidence testing. The emulator is a Windows executable
-that runs on a local machine.
-
-To use the emulator, install Azure SDK 2.3 and start the storage emulator. Then,
-edit `src/test/resources/azure-test.xml` and add the following property:
-
-```xml
-<property>
- <name>fs.azure.test.emulator</name>
- <value>true</value>
-</property>
-```
-
-There is a known issue when running tests with the emulator. You may see the
-following failure message:
-
- com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
-
-To resolve this, restart the Azure Emulator. Ensure it v3.2 or later.
-
-It's also possible to run tests against a live Azure Storage account by saving a
-file to `src/test/resources/azure-auth-keys.xml` and setting
-the name of the storage account and its access key.
-
-For example:
-
-```xml
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>fs.azure.test.account.name</name>
- <value>{ACCOUNTNAME}.blob.core.windows.net</value>
- </property>
- <property>
- <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
- <value>{ACCOUNT ACCESS KEY}</value>
- </property>
-</configuration>
-```
-
-To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
-and the account access key. For example:
-
-```xml
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>fs.contract.test.fs.wasb</name>
- <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
- <description>The name of the azure file system for testing.</description>
- </property>
- <property>
- <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
- <value>{ACCOUNT ACCESS KEY}</value>
- </property>
-</configuration>
-```
-
-Overall, to run all the tests using `mvn test`, a sample `azure-auth-keys.xml` is like following:
-
-```xml
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>fs.azure.test.account.name</name>
- <value>{ACCOUNTNAME}.blob.core.windows.net</value>
- </property>
- <property>
- <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
- <value>{ACCOUNT ACCESS KEY}</value>
- </property>
- <property>
- <name>fs.contract.test.fs.wasb</name>
- <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
- </property>
-</configuration>
-```
-
-DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL. The keys to your Azure
-Storage account are a secret and must not be shared.
+## Further Reading
+* [Testing the Azure WASB client](testing_azure.html).
+* MSDN article, [Understanding Block Blobs, Append Blobs, and Page Blobs](https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
new file mode 100644
index 0000000..b58e68b
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
@@ -0,0 +1,576 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+# Testing the Azure WASB client
+
+<!-- MACRO{toc|fromDepth=0|toDepth=5} -->
+
+This module includes both unit tests, which can run in isolation without
+connecting to the Azure Storage service, and integration tests, which require a working
+connection to interact with a container. Unit test suites follow the naming
+convention `Test*.java`. Integration tests follow the naming convention
+`ITest*.java`.
+
+## Policy for submitting patches which affect the `hadoop-azure` module.
+
+The Apache Jenkins infrastucture does not run any cloud integration tests,
+due to the need to keep credentials secure.
+
+### The submitter of any patch is required to run all the integration tests and declare which Azure region they used.
+
+This is important: **patches which do not include this declaration will be ignored**
+
+This policy has proven to be the only mechanism to guarantee full regression
+testing of code changes. Why the declaration of region? Two reasons
+
+1. It helps us identify regressions which only surface against specific endpoints.
+1. It forces the submitters to be more honest about their testing. It's easy
+to lie, "yes, I tested this". To say "yes, I tested this against Azure US-west"
+is a more specific lie and harder to make. And, if you get caught out: you
+lose all credibility with the project.
+
+You don't need to test from a VM within the Azure infrastructure, all you need
+are credentials.
+
+It's neither hard nor expensive to run the tests; if you can't,
+there's no guarantee your patch works. The reviewers have enough to do, and
+don't have the time to do these tests, especially as every failure will simply
+make for a slow iterative development.
+
+Please: run the tests. And if you don't, we are sorry for declining your
+patch, but we have to.
+
+
+### What if there's an intermittent failure of a test?
+
+Some of the tests do fail intermittently, especially in parallel runs.
+If this happens, try to run the test on its own to see if the test succeeds.
+
+If it still fails, include this fact in your declaration. We know some tests
+are intermittently unreliable.
+
+### What if the tests are timing out or failing over my network connection?
+
+The tests are designed to be configurable for different
+timeouts. If you are seeing problems and this configuration isn't working,
+that's a sign of the configuration mechanism isn't complete. If it's happening
+in the production code, that could be a sign of a problem which may surface
+over long-haul connections. Please help us identify and fix these problems
+— especially as you are the one best placed to verify the fixes work.
+
+## Setting up the tests
+
+## Testing the `hadoop-azure` Module
+
+The `hadoop-azure` module includes a full suite of unit tests. Many of the tests
+will run without additional configuration by running `mvn test`. This includes
+tests against mocked storage, which is an in-memory emulation of Azure Storage.
+
+The integration tests are designed to test directly against an Azure storage
+service, and require an account and credentials in order to run.
+
+This is done by creating the file to `src/test/resources/azure-auth-keys.xml`
+and setting the name of the storage account and its access key.
+
+For example:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>fs.azure.test.account.name</name>
+ <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+ </property>
+ <property>
+ <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+ <value>{ACCOUNT ACCESS KEY}</value>
+ </property>
+</configuration>
+```
+
+To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
+and the account access key. For example:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>fs.contract.test.fs.wasb</name>
+ <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+ <description>The name of the azure file system for testing.</description>
+ </property>
+ <property>
+ <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+ <value>{ACCOUNT ACCESS KEY}</value>
+ </property>
+</configuration>
+```
+
+Overall, to run all the tests using `mvn test`, a sample `azure-auth-keys.xml` is like following:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>fs.azure.test.account.name</name>
+ <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+ </property>
+ <property>
+ <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+ <value>{ACCOUNT ACCESS KEY}</value>
+ </property>
+ <property>
+ <name>fs.contract.test.fs.wasb</name>
+ <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+ </property>
+</configuration>
+```
+
+DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL. The keys to your Azure
+Storage account are a secret and must not be shared.
+
+
+## Running the Tests
+
+After completing the configuration, execute the test run through Maven.
+
+```bash
+mvn -T 1C clean verify
+```
+
+It's also possible to execute multiple test suites in parallel by passing the
+`parallel-tests` property on the command line. The tests spend most of their
+time blocked on network I/O, so running in parallel tends to
+complete full test runs faster.
+
+```bash
+mvn -T 1C -Dparallel-tests clean verify
+```
+
+Some tests must run with exclusive access to the storage container, so even with the
+`parallel-tests` property, several test suites will run in serial in a separate
+Maven execution step after the parallel tests.
+
+By default, `parallel-tests` runs 4 test suites concurrently. This can be tuned
+by passing the `testsThreadCount` property.
+
+```bash
+mvn -T 1C -Dparallel-tests -DtestsThreadCount=8 clean verify
+```
+
+<!---
+To run just unit tests, which do not require Azure connectivity or credentials,
+use any of the above invocations, but switch the goal to `test` instead of
+`verify`.
+-->
+
+```bash
+mvn -T 1C clean test
+
+mvn -T 1C -Dparallel-tests clean test
+
+mvn -T 1C -Dparallel-tests -DtestsThreadCount=8 clean test
+```
+
+To run only a specific named subset of tests, pass the `test` property for unit
+tests or the `it.test` property for integration tests.
+
+```bash
+mvn -T 1C clean test -Dtest=TestRollingWindowAverage
+
+mvn -T 1C clean verify -Dscale -Dit.test=ITestFileSystemOperationExceptionMessage -Dtest=none
+
+mvn -T 1C clean verify -Dtest=none -Dit.test=ITest*
+
+```
+
+Note
+
+1. When running a specific subset of tests, the patterns passed in `test`
+and `it.test` override the configuration of which tests need to run in isolation
+in a separate serial phase (mentioned above). This can cause unpredictable
+results, so the recommendation is to avoid passing `parallel-tests` in
+combination with `test` or `it.test`. If you know that you are specifying only
+tests that can run safely in parallel, then it will work. For wide patterns,
+like `ITest*` shown above, it may cause unpredictable test failures.
+
+2. The command line shell may try to expand the "*" and sometimes the "#" symbols
+in test patterns. In such situations, escape the character it with a "\\" prefix.
+Example:
+
+ mvn -T 1C clean verify -Dtest=none -Dit.test=ITest\*
+
+
+## Viewing the results
+
+Integration test results and logs are stored in `target/failsafe-reports/`.
+An HTML report can be generated during site generation, or with the `surefire-report`
+plugin:
+
+```bash
+
+# for the unit tests
+mvn -T 1C surefire-report:report-only
+
+# for the integration tests
+mvn -T 1C surefire-report:failsafe-report-only
+
+# all reports for this module
+mvn -T 1C site:site
+```
+
+## Scale Tests
+
+There are a set of tests designed to measure the scalability and performance
+at scale of the filesystem client, *Scale Tests*. Tests include: creating
+and traversing directory trees, uploading large files, renaming them,
+deleting them, seeking through the files, performing random IO, and others.
+This makes them a foundational part of the benchmarking.
+
+By their very nature they are slow. And, as their execution time is often
+limited by bandwidth between the computer running the tests and the Azure endpoint,
+parallel execution does not speed these tests up.
+
+### Enabling the Scale Tests
+
+The tests are enabled if the `scale` property is set in the maven build
+this can be done regardless of whether or not the parallel test profile
+is used
+
+```bash
+mvn -T 1C verify -Dscale
+
+mvn -T 1C verify -Dparallel-tests -Dscale -DtestsThreadCount=8
+```
+
+The most bandwidth intensive tests (those which upload data) always run
+sequentially; those which are slow due to HTTPS setup costs or server-side
+actions are included in the set of parallelized tests.
+
+
+### Scale test tuning options
+
+
+Some of the tests can be tuned from the maven build or from the
+configuration file used to run the tests.
+
+```bash
+mvn -T 1C verify -Dparallel-tests -Dscale -DtestsThreadCount=8 -Dfs.azure.scale.test.huge.filesize=128M
+```
+
+The algorithm is
+
+1. The value is queried from the configuration file, using a default value if
+it is not set.
+1. The value is queried from the JVM System Properties, where it is passed
+down by maven.
+1. If the system property is null, an empty string, or it has the value `unset`,
+then the configuration value is used. The `unset` option is used to
+[work round a quirk in maven property propagation](http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven).
+
+Only a few properties can be set this way; more will be added.
+
+| Property | Meaninging |
+|-----------|-------------|
+| `fs.azure.scale.test.huge.filesize`| Size for huge file uploads |
+| `fs.azure.scale.test.huge.huge.partitionsize`| Size for partitions in huge file uploads |
+
+The file and partition sizes are numeric values with a k/m/g/t/p suffix depending
+on the desired size. For example: 128M, 128m, 2G, 2G, 4T or even 1P.
+
+#### Scale test configuration options
+
+Some scale tests perform multiple operations (such as creating many directories).
+
+The exact number of operations to perform is configurable in the option
+`scale.test.operation.count`
+
+```xml
+<property>
+ <name>scale.test.operation.count</name>
+ <value>10</value>
+</property>
+```
+
+Larger values generate more load, and are recommended when testing locally,
+or in batch runs.
+
+Smaller values results in faster test runs, especially when the object
+store is a long way away.
+
+Operations which work on directories have a separate option: this controls
+the width and depth of tests creating recursive directories. Larger
+values create exponentially more directories, with consequent performance
+impact.
+
+```xml
+<property>
+ <name>scale.test.directory.count</name>
+ <value>2</value>
+</property>
+```
+
+DistCp tests targeting Azure support a configurable file size. The default is
+10 MB, but the configuration value is expressed in KB so that it can be tuned
+smaller to achieve faster test runs.
+
+```xml
+<property>
+ <name>scale.test.distcp.file.size.kb</name>
+ <value>10240</value>
+</property>
+```
+
+Azure-specific scale test properties are
+
+##### `fs.azure.scale.test.huge.filesize`: size in MB for "Huge file tests".
+
+The Huge File tests validate Azure storages's ability to handle large files —the property
+`fs.azure.scale.test.huge.filesize` declares the file size to use.
+
+```xml
+<property>
+ <name>fs.azure.scale.test.huge.filesize</name>
+ <value>200M</value>
+</property>
+```
+
+Tests at this scale are slow: they are best executed from hosts running in
+the cloud infrastructure where the storage endpoint is based.
+
+## Using the emulator
+
+A selection of tests can run against the
+[Azure Storage Emulator](http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx)
+which is a high-fidelity emulation of live Azure Storage. The emulator is
+sufficient for high-confidence testing. The emulator is a Windows executable
+that runs on a local machine.
+
+To use the emulator, install Azure SDK 2.3 and start the storage emulator. Then,
+edit `src/test/resources/azure-test.xml` and add the following property:
+
+```xml
+<property>
+ <name>fs.azure.test.emulator</name>
+ <value>true</value>
+</property>
+```
+
+There is a known issue when running tests with the emulator. You may see the
+following failure message:
+
+ com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
+
+To resolve this, restart the Azure Emulator. Ensure it is v3.2 or later.
+
+
+## Debugging Test failures
+
+Logging at debug level is the standard way to provide more diagnostics output;
+after setting this rerun the tests
+
+```properties
+log4j.logger.org.apache.hadoop.fs.azure=DEBUG
+```
+
+## Adding new tests
+
+New tests are always welcome. Bear in mind that we need to keep costs
+and test time down, which is done by
+
+* Not duplicating tests.
+* Being efficient in your use of Hadoop API calls.
+* Isolating large/slow tests into the "scale" test group.
+* Designing all tests to execute in parallel (where possible).
+* Adding new probes and predicates into existing tests, albeit carefully.
+
+*No duplication*: if an operation is tested elsewhere, don't repeat it. This
+applies as much for metadata operations as it does for bulk IO. If a new
+test case is added which completely obsoletes an existing test, it is OK
+to cut the previous one —after showing that coverage is not worsened.
+
+*Efficient*: prefer the `getFileStatus()` and examining the results, rather than
+call to `exists()`, `isFile()`, etc.
+
+*Fail with useful information:* provide as much diagnostics as possible
+on a failure. Using `org.apache.hadoop.fs.contract.ContractTestUtils` to make
+assertions about the state of a filesystem helps here.
+
+*Isolating Scale tests*. Any test doing large amounts of IO MUST extend the
+class `AbstractAzureScaleTest`, so only running if `scale` is defined on a build,
+supporting test timeouts configurable by the user. Scale tests should also
+support configurability as to the actual size of objects/number of operations,
+so that behavior at different scale can be verified.
+
+*Designed for parallel execution*. A key need here is for each test suite to work
+on isolated parts of the filesystem. Subclasses of `AbstractWasbTestBase`
+SHOULD use the `path()`, `methodpath()` and `blobpath()` methods,
+to build isolated paths. Tests MUST NOT assume that they have exclusive access
+to a bucket.
+
+*Extending existing tests where appropriate*. This recommendation goes
+against normal testing best practise of "test one thing per method".
+Because it is so slow to create directory trees or upload large files, we do
+not have that luxury. All the tests against real endpoints are integration
+tests where sharing test setup and teardown saves time and money.
+
+A standard way to do this is to extend existing tests with some extra predicates,
+rather than write new tests. When doing this, make sure that the new predicates
+fail with meaningful diagnostics, so any new problems can be easily debugged
+from test logs.
+
+
+### Requirements of new Tests
+
+
+This is what we expect from new tests; they're an extension of the normal
+Hadoop requirements, based on the need to work with remote servers whose
+use requires the presence of secret credentials, where tests may be slow,
+and where finding out why something failed from nothing but the test output
+is critical.
+
+#### Subclasses Existing Shared Base Blasses
+
+There are a set of base classes which should be extended for Azure tests and
+integration tests.
+
+##### `org.apache.hadoop.fs.azure.AbstractWasbTestWithTimeout`
+
+This extends the junit `Assert` class with thread names and timeouts,
+the default timeout being set in `AzureTestConstants.AZURE_TEST_TIMEOUT` to
+ten minutes. The thread names are set to aid analyzing the stack trace of
+a test: a `jstack` call can be used to
+
+##### `org.apache.hadoop.fs.azure.AbstractWasbTestBase`
+
+The base class for tests which use `AzureBlobStorageTestAccount` to create
+mock or live Azure clients; in test teardown it tries to clean up store state.
+
+1. This class requires subclasses to implement `createTestAccount()` to create
+a mock or real test account.
+
+1. The configuration used to create a test account *should* be that from
+`createConfiguration()`; this can be extended in subclasses to tune the settings.
+
+
+##### `org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest`
+
+This extends `AbstractWasbTestBase` for scale tests; those test which
+only run when `-Dscale` is used to select the "scale" profile.
+These tests have a timeout of 30 minutes, so as to support slow test runs.
+
+Having shared base classes help reduces future maintenance. Please
+use them.
+
+#### Secure
+
+Don't ever log credentials. The credential tests go out of their way to
+not provide meaningful logs or assertion messages precisely to avoid this.
+
+#### Efficient of Time and Money
+
+This means efficient in test setup/teardown, and, ideally, making use of
+existing public datasets to save setup time and tester cost.
+
+
+The reference example is `ITestAzureHugeFiles`:. This marks the test suite as
+`@FixMethodOrder(MethodSorters.NAME_ASCENDING)` then orders the test cases such
+that each test case expects the previous test to have completed (here: uploaded a file,
+renamed a file, ...). This provides for independent tests in the reports, yet still
+permits an ordered sequence of operations. Do note the use of `Assume.assume()`
+to detect when the preconditions for a single test case are not met, hence,
+the tests become skipped, rather than fail with a trace which is really a false alarm.
+
+
+### Works Over Long-haul Links
+
+As well as making file size and operation counts scaleable, this includes
+making test timeouts adequate. The Scale tests make this configurable; it's
+hard coded to ten minutes in `AbstractAzureIntegrationTest()`; subclasses can
+change this by overriding `getTestTimeoutMillis()`.
+
+Equally importantly: support proxies, as some testers need them.
+
+
+### Provides Diagnostics and timing information
+
+1. Create logs, log things.
+1. you can use `AbstractWasbTestBase.describe(format-string, args)` here; it
+adds some newlines so as to be easier to spot.
+1. Use `ContractTestUtils.NanoTimer` to measure the duration of operations,
+and log the output.
+
+#### Fails Meaningfully
+
+The `ContractTestUtils` class contains a whole set of assertions for making
+statements about the expected state of a filesystem, e.g.
+`assertPathExists(FS, path)`, `assertPathDoesNotExists(FS, path)`, and others.
+These do their best to provide meaningful diagnostics on failures (e.g. directory
+listings, file status, ...), so help make failures easier to understand.
+
+At the very least, *do not use `assertTrue()` or `assertFalse()` without
+including error messages*.
+
+
+### Cleans Up Afterwards
+
+Keeps costs down.
+
+1. Do not only cleanup if a test case completes successfully; test suite
+teardown must do it.
+1. That teardown code must check for the filesystem and other fields being
+null before the cleanup. Why? If test setup fails, the teardown methods still
+get called.
+
+### Works Reliably
+
+We really appreciate this — you will too.
+
+
+## Tips
+
+### How to keep your credentials really safe
+
+Although the `auth-keys.xml` file is marged as ignored in git and subversion,
+it is still in your source tree, and there's always that risk that it may
+creep out.
+
+You can avoid this by keeping your keys outside the source tree and
+using an absolute XInclude reference to it.
+
+```xml
+<configuration>
+
+ <include xmlns="http://www.w3.org/2001/XInclude"
+ href="file:///users/qe/.auth-keys.xml" />
+
+</configuration>
+```
+
+### Cleaning up Containers
+
+The Azure tests create containers with the prefix `"wasbtests-"` and delete
+them after the test runs. If a test run is interrupted, these containers
+may not get deleted. There is a special test case which can be manually invoked
+to list and delete these, `CleanupTestContainers`
+
+```bash
+mvn test -Dtest=CleanupTestContainers
+```
+
+This will delete the containers; the output log of the test run will
+provide the details and summary of the operation.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
index d04a19c..0d3a06c 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
@@ -18,15 +18,21 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assume.assumeNotNull;
+import java.io.IOException;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.junit.Assume.assumeNotNull;
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+
/**
* Abstract test class that provides basic setup and teardown of testing Azure
* Storage account. Each subclass defines a different set of test cases to run
@@ -34,41 +40,137 @@ import org.slf4j.LoggerFactory;
* to run those tests. The returned account might integrate with Azure Storage
* directly or it might be a mock implementation.
*/
-public abstract class AbstractWasbTestBase {
+public abstract class AbstractWasbTestBase extends AbstractWasbTestWithTimeout
+ implements AzureTestConstants {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractWasbTestBase.class);
- @VisibleForTesting
protected NativeAzureFileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
+ protected AzureBlobStorageTestAccount testAccount;
@Before
public void setUp() throws Exception {
- testAccount = createTestAccount();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
+ AzureBlobStorageTestAccount account = createTestAccount();
+ assumeNotNull(account);
+ bindToTestAccount(account);
}
@After
public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
+ describe("closing test account and filesystem");
+ testAccount = cleanupTestAccount(testAccount);
+ IOUtils.closeStream(fs);
+ fs = null;
}
- public Configuration getConfiguration() {
- return new Configuration();
+ /**
+ * Create the configuration to use when creating a test account.
+ * Subclasses can override this to tune the test account configuration.
+ * @return a configuration.
+ */
+ public Configuration createConfiguration() {
+ return AzureBlobStorageTestAccount.createTestConfiguration();
}
+ /**
+ * Create the test account.
+ * Subclasses must implement this.
+ * @return the test account.
+ * @throws Exception
+ */
protected abstract AzureBlobStorageTestAccount createTestAccount()
throws Exception;
+ /**
+ * Get the test account.
+ * @return the current test account.
+ */
protected AzureBlobStorageTestAccount getTestAccount() {
return testAccount;
}
+
+ /**
+ * Get the filesystem
+ * @return the current filesystem.
+ */
+ protected NativeAzureFileSystem getFileSystem() {
+ return fs;
+ }
+
+ /**
+ * Get the configuration used to create the filesystem
+ * @return the configuration of the test FS
+ */
+ protected Configuration getConfiguration() {
+ return getFileSystem().getConf();
+ }
+
+ /**
+ * Bind to a new test account; closing any existing one.
+ * This updates the test account returned in {@link #getTestAccount()}
+ * and the filesystem in {@link #getFileSystem()}.
+ * @param account new test account
+ */
+ protected void bindToTestAccount(AzureBlobStorageTestAccount account) {
+ // clean any existing test account
+ cleanupTestAccount(testAccount);
+ IOUtils.closeStream(fs);
+ testAccount = account;
+ if (testAccount != null) {
+ fs = testAccount.getFileSystem();
+ }
+ }
+
+ /**
+ * Return a path to a blob which will be unique for this fork.
+ * @param filepath filepath
+ * @return a path under the default blob directory
+ * @throws IOException
+ */
+ protected Path blobPath(String filepath) throws IOException {
+ return blobPathForTests(getFileSystem(), filepath);
+ }
+
+ /**
+ * Create a path under the test path provided by
+ * the FS contract.
+ * @param filepath path string in
+ * @return a path qualified by the test filesystem
+ * @throws IOException IO problems
+ */
+ protected Path path(String filepath) throws IOException {
+ return pathForTests(getFileSystem(), filepath);
+ }
+
+ /**
+ * Return a path bonded to this method name, unique to this fork during
+ * parallel execution.
+ * @return a method name unique to (fork, method).
+ * @throws IOException IO problems
+ */
+ protected Path methodPath() throws IOException {
+ return path(methodName.getMethodName());
+ }
+
+ /**
+ * Return a blob path bonded to this method name, unique to this fork during
+ * parallel execution.
+ * @return a method name unique to (fork, method).
+ * @throws IOException IO problems
+ */
+ protected Path methodBlobPath() throws IOException {
+ return blobPath(methodName.getMethodName());
+ }
+
+ /**
+ * Describe a test in the logs.
+ * @param text text to print
+ * @param args arguments to format in the printing
+ */
+ protected void describe(String text, Object... args) {
+ LOG.info("\n\n{}: {}\n",
+ methodName.getMethodName(),
+ String.format(text, args));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java
new file mode 100644
index 0000000..b7076a4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+
+/**
+ * Base class for any Wasb test with timeouts & named threads.
+ * This class does not attempt to bind to Azure.
+ */
+public class AbstractWasbTestWithTimeout extends Assert {
+
+ /**
+ * The name of the current method.
+ */
+ @Rule
+ public TestName methodName = new TestName();
+ /**
+ * Set the timeout for every test.
+ * This is driven by the value returned by {@link #getTestTimeoutMillis()}.
+ */
+ @Rule
+ public Timeout testTimeout = new Timeout(getTestTimeoutMillis());
+
+ /**
+ * Name the junit thread for the class. This will overridden
+ * before the individual test methods are run.
+ */
+ @BeforeClass
+ public static void nameTestThread() {
+ Thread.currentThread().setName("JUnit");
+ }
+
+ /**
+ * Name the thread to the current test method.
+ */
+ @Before
+ public void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ /**
+ * Override point: the test timeout in milliseconds.
+ * @return a timeout in milliseconds
+ */
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.AZURE_TEST_TIMEOUT;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index 7fa59ce..5b36c87 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -21,12 +21,15 @@ package org.apache.hadoop.fs.azure;
import com.microsoft.azure.storage.*;
import com.microsoft.azure.storage.blob.*;
import com.microsoft.azure.storage.core.Base64;
-import org.apache.commons.configuration2.SubsetConfiguration;
+import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.commons.configuration2.SubsetConfiguration;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
import org.apache.hadoop.metrics2.AbstractMetric;
@@ -35,6 +38,8 @@ import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
+import java.io.File;
+import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.*;
@@ -46,10 +51,10 @@ import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECU
/**
* Helper class to create WASB file systems backed by either a mock in-memory
- * implementation or a real Azure Storage account. See RunningLiveWasbTests.txt
- * for instructions on how to connect to a real Azure Storage account.
+ * implementation or a real Azure Storage account.
*/
-public final class AzureBlobStorageTestAccount {
+public final class AzureBlobStorageTestAccount implements AutoCloseable,
+ AzureTestConstants {
private static final Logger LOG = LoggerFactory.getLogger(
AzureBlobStorageTestAccount.class);
@@ -166,6 +171,7 @@ public final class AzureBlobStorageTestAccount {
return new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
}
+ @Deprecated
public static Path pageBlobPath(String fileName) {
return new Path(pageBlobPath(), fileName);
}
@@ -201,6 +207,9 @@ public final class AzureBlobStorageTestAccount {
* @return
*/
private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
+ Assert.assertNotNull("null filesystem", fs);
+ Assert.assertNotNull("null filesystemn instance ID",
+ fs.getInstrumentation().getFileSystemInstanceId());
String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
for (MetricsTag currentTag : currentRecord.tags()) {
if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
@@ -247,13 +256,16 @@ public final class AzureBlobStorageTestAccount {
getBlobReference(blobKey).releaseLease(accessCondition);
}
- private static void saveMetricsConfigFile() {
+ private static void saveMetricsConfigFile() throws IOException {
if (!metricsConfigSaved) {
+ String testFilename = TestMetricsConfig.getTestFilename(
+ "hadoop-metrics2-azure-file-system");
+ File dest = new File(testFilename).getCanonicalFile();
+ dest.getParentFile().mkdirs();
new org.apache.hadoop.metrics2.impl.ConfigBuilder()
.add("azure-file-system.sink.azuretestcollector.class",
StandardCollector.class.getName())
- .save(TestMetricsConfig.getTestFilename(
- "hadoop-metrics2-azure-file-system.properties"));
+ .save(testFilename);
metricsConfigSaved = true;
}
}
@@ -314,9 +326,8 @@ public final class AzureBlobStorageTestAccount {
Configuration conf = createTestConfiguration();
if (!conf.getBoolean(USE_EMULATOR_PROPERTY_NAME, false)) {
// Not configured to test against the storage emulator.
- LOG.warn("Skipping emulator Azure test because configuration doesn't "
- + "indicate that it's running. Please see RunningLiveWasbTests.txt "
- + "for guidance.");
+ LOG.warn("Skipping emulator Azure test because configuration "
+ + "doesn't indicate that it's running.");
return null;
}
CloudStorageAccount account =
@@ -482,8 +493,7 @@ public final class AzureBlobStorageTestAccount {
credentials = StorageCredentialsAnonymous.ANONYMOUS;
} else {
LOG.warn("Skipping live Azure test because of missing key for"
- + " account '" + accountName + "'. "
- + "Please see RunningLiveWasbTests.txt for guidance.");
+ + " account '" + accountName + "'.");
return null;
}
} else {
@@ -517,8 +527,7 @@ public final class AzureBlobStorageTestAccount {
throws URISyntaxException, KeyProviderException {
String testAccountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
if (testAccountName == null) {
- LOG.warn("Skipping live Azure test because of missing test account. "
- + "Please see RunningLiveWasbTests.txt for guidance.");
+ LOG.warn("Skipping live Azure test because of missing test account");
return null;
}
return createStorageAccount(testAccountName, conf, false);
@@ -863,6 +872,11 @@ public final class AzureBlobStorageTestAccount {
}
}
+ @Override
+ public void close() throws Exception {
+ cleanup();
+ }
+
public NativeAzureFileSystem getFileSystem() {
return fs;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java
new file mode 100644
index 0000000..7e733dc
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+
+/**
+ * Handle OOB IO into a shared container.
+ */
+public class ITestAzureConcurrentOutOfBandIo extends AbstractWasbTestBase {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ITestAzureConcurrentOutOfBandIo.class);
+
+ // Class constants.
+ static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
+ static final int UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
+ static final int BLOB_SIZE = 32 * 1024 * 1024;
+
+ // Number of blocks to be written before flush.
+ static final int NUMBER_OF_BLOCKS = 2;
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createOutOfBandStore(
+ UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
+ }
+
+ class DataBlockWriter implements Runnable {
+
+ Thread runner;
+ AzureBlobStorageTestAccount writerStorageAccount;
+ String key;
+ boolean done = false;
+
+ /**
+ * Constructor captures the test account.
+ *
+ * @param testAccount
+ */
+ public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) {
+ writerStorageAccount = testAccount;
+ this.key = key;
+ }
+
+ /**
+ * Start writing blocks to Azure storage.
+ */
+ public void startWriting() {
+ runner = new Thread(this); // Create the block writer thread.
+ runner.start(); // Start the block writer thread.
+ }
+
+ /**
+ * Stop writing blocks to Azure storage.
+ */
+ public void stopWriting() {
+ done = true;
+ }
+
+ /**
+ * Implementation of the runnable interface. The run method is a tight loop
+ * which repeatedly updates the blob with a 4 MB block.
+ */
+ public void run() {
+ byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
+
+ OutputStream outputStream = null;
+
+ try {
+ for (int i = 0; !done; i++) {
+ // Write two 4 MB blocks to the blob.
+ //
+ outputStream = writerStorageAccount.getStore().storefile(
+ key,
+ new PermissionStatus("", "", FsPermission.getDefault()),
+ key);
+
+ Arrays.fill(dataBlockWrite, (byte) (i % 256));
+ for (int j = 0; j < NUMBER_OF_BLOCKS; j++) {
+ outputStream.write(dataBlockWrite);
+ }
+
+ outputStream.flush();
+ outputStream.close();
+ }
+ } catch (AzureException e) {
+ LOG.error("DatablockWriter thread encountered a storage exception."
+ + e.getMessage(), e);
+ } catch (IOException e) {
+ LOG.error("DatablockWriter thread encountered an I/O exception."
+ + e.getMessage(), e);
+ }
+ }
+ }
+
+ @Test
+ public void testReadOOBWrites() throws Exception {
+
+ byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
+ byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
+
+ // Write to blob to make sure it exists.
+ //
+ // Write five 4 MB blocks to the blob. To ensure there is data in the blob before
+ // reading. This eliminates the race between the reader and writer threads.
+ String key = "WASB_String" + AzureTestUtils.getForkID() + ".txt";
+ OutputStream outputStream = testAccount.getStore().storefile(
+ key,
+ new PermissionStatus("", "", FsPermission.getDefault()),
+ key);
+ Arrays.fill(dataBlockWrite, (byte) 255);
+ for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
+ outputStream.write(dataBlockWrite);
+ }
+
+ outputStream.flush();
+ outputStream.close();
+
+ // Start writing blocks to Azure store using the DataBlockWriter thread.
+ DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount, key);
+ writeBlockTask.startWriting();
+ int count = 0;
+
+ for (int i = 0; i < 5; i++) {
+ try(InputStream inputStream = testAccount.getStore().retrieve(key)) {
+ count = 0;
+ int c = 0;
+
+ while (c >= 0) {
+ c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
+ if (c < 0) {
+ break;
+ }
+
+ // Counting the number of bytes.
+ count += c;
+ }
+ } catch (IOException e) {
+ System.out.println(e.getCause().toString());
+ e.printStackTrace();
+ fail();
+ }
+ }
+
+ // Stop writing blocks.
+ writeBlockTask.stopWriting();
+
+ // Validate that a block was read.
+ assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java
new file mode 100644
index 0000000..2b0ea56
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+/**
+ * Extends ITestAzureConcurrentOutOfBandIo in order to run testReadOOBWrites with secure mode
+ * (fs.azure.secure.mode) both enabled and disabled.
+ */
+public class ITestAzureConcurrentOutOfBandIoWithSecureMode
+ extends ITestAzureConcurrentOutOfBandIo {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createOutOfBandStore(
+ UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE, true);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java
new file mode 100644
index 0000000..49e6730
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.concurrent.Callable;
+
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageEvent;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.junit.Assume.assumeNotNull;
+
+/**
+ * Error handling.
+ */
+public class ITestAzureFileSystemErrorConditions extends
+ AbstractWasbTestWithTimeout {
+ private static final int ALL_THREE_FILE_SIZE = 1024;
+
+ @Test
+ public void testNoInitialize() throws Exception {
+ intercept(AssertionError.class,
+ new Callable<FileMetadata>() {
+ @Override
+ public FileMetadata call() throws Exception {
+ return new AzureNativeFileSystemStore()
+ .retrieveMetadata("foo");
+ }
+ });
+ }
+
+ /**
+ * Try accessing an unauthorized or non-existent (treated the same) container
+ * from WASB.
+ */
+ @Test
+ public void testAccessUnauthorizedPublicContainer() throws Exception {
+ final String container = "nonExistentContainer";
+ final String account = "hopefullyNonExistentAccount";
+ Path noAccessPath = new Path(
+ "wasb://" + container + "@" + account + "/someFile");
+ NativeAzureFileSystem.suppressRetryPolicy();
+ try {
+ FileSystem.get(noAccessPath.toUri(), new Configuration())
+ .open(noAccessPath);
+ assertTrue("Should've thrown.", false);
+ } catch (AzureException ex) {
+ GenericTestUtils.assertExceptionContains(
+ String.format(NO_ACCESS_TO_CONTAINER_MSG, account, container), ex);
+ } finally {
+ NativeAzureFileSystem.resumeRetryPolicy();
+ }
+ }
+
+ @Test
+ public void testAccessContainerWithWrongVersion() throws Exception {
+ AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+ MockStorageInterface mockStorage = new MockStorageInterface();
+ store.setAzureStorageInteractionLayer(mockStorage);
+ try (FileSystem fs = new NativeAzureFileSystem(store)) {
+ Configuration conf = new Configuration();
+ AzureBlobStorageTestAccount.setMockAccountKey(conf);
+ HashMap<String, String> metadata = new HashMap<String, String>();
+ metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
+ "2090-04-05"); // It's from the future!
+ mockStorage.addPreExistingContainer(
+ AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
+
+ AzureException ex = intercept(AzureException.class,
+ new Callable<FileStatus[]>() {
+ @Override
+ public FileStatus[] call() throws Exception {
+ fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI),
+ conf);
+ return fs.listStatus(new Path("/"));
+ }
+ });
+ GenericTestUtils.assertExceptionContains(
+ "unsupported version: 2090-04-05.", ex);
+ }
+ }
+
+ private interface ConnectionRecognizer {
+ boolean isTargetConnection(HttpURLConnection connection);
+ }
+
+ private class TransientErrorInjector extends StorageEvent<SendingRequestEvent> {
+ private final ConnectionRecognizer connectionRecognizer;
+ private boolean injectedErrorOnce = false;
+
+ public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
+ this.connectionRecognizer = connectionRecognizer;
+ }
+
+ @Override
+ public void eventOccurred(SendingRequestEvent eventArg) {
+ HttpURLConnection connection
+ = (HttpURLConnection) eventArg.getConnectionObject();
+ if (!connectionRecognizer.isTargetConnection(connection)) {
+ return;
+ }
+ if (!injectedErrorOnce) {
+ connection.setReadTimeout(1);
+ connection.disconnect();
+ injectedErrorOnce = true;
+ }
+ }
+ }
+
+ private void injectTransientError(NativeAzureFileSystem fs,
+ final ConnectionRecognizer connectionRecognizer) {
+ fs.getStore().addTestHookToOperationContext(new TestHookOperationContext() {
+ @Override
+ public OperationContext modifyOperationContext(OperationContext original) {
+ original.getSendingRequestEventHandler().addListener(
+ new TransientErrorInjector(connectionRecognizer));
+ return original;
+ }
+ });
+ }
+
+ @Test
+ public void testTransientErrorOnDelete() throws Exception {
+ // Need to do this test against a live storage account
+ AzureBlobStorageTestAccount testAccount =
+ AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+ try {
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ injectTransientError(fs, new ConnectionRecognizer() {
+ @Override
+ public boolean isTargetConnection(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("DELETE");
+ }
+ });
+ Path testFile = new Path("/a/b");
+ assertTrue(fs.createNewFile(testFile));
+ assertTrue(fs.rename(testFile, new Path("/x")));
+ } finally {
+ testAccount.cleanup();
+ }
+ }
+
+ private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
+ throws IOException {
+ byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
+ Arrays.fill(buffer, (byte) 3);
+ try(OutputStream stream = fs.create(testFile)) {
+ stream.write(buffer);
+ }
+ }
+
+ private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)
+ throws IOException {
+ byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
+ InputStream inStream = fs.open(testFile);
+ assertEquals(buffer.length,
+ inStream.read(buffer, 0, buffer.length));
+ inStream.close();
+ for (int i = 0; i < buffer.length; i++) {
+ assertEquals(3, buffer[i]);
+ }
+ }
+
+ @Test
+ public void testTransientErrorOnCommitBlockList() throws Exception {
+ // Need to do this test against a live storage account
+ AzureBlobStorageTestAccount testAccount =
+ AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+ try {
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ injectTransientError(fs, new ConnectionRecognizer() {
+ @Override
+ public boolean isTargetConnection(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("PUT")
+ && connection.getURL().getQuery() != null
+ && connection.getURL().getQuery().contains("blocklist");
+ }
+ });
+ Path testFile = new Path("/a/b");
+ writeAllThreeFile(fs, testFile);
+ readAllThreeFile(fs, testFile);
+ } finally {
+ testAccount.cleanup();
+ }
+ }
+
+ @Test
+ public void testTransientErrorOnRead() throws Exception {
+ // Need to do this test against a live storage account
+ AzureBlobStorageTestAccount testAccount =
+ AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+ try {
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ Path testFile = new Path("/a/b");
+ writeAllThreeFile(fs, testFile);
+ injectTransientError(fs, new ConnectionRecognizer() {
+ @Override
+ public boolean isTargetConnection(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("GET");
+ }
+ });
+ readAllThreeFile(fs, testFile);
+ } finally {
+ testAccount.cleanup();
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
new file mode 100644
index 0000000..f969968
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import org.junit.Test;
+
+import com.microsoft.azure.storage.StorageException;
+
+/**
+ * Tests the Native Azure file system (WASB) against an actual blob store.
+ */
+public class ITestNativeAzureFileSystemLive extends
+ NativeAzureFileSystemBaseTest {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Test
+ public void testLazyRenamePendingCanOverwriteExistingFile()
+ throws Exception {
+ final String srcFile = "srcFile";
+ final String dstFile = "dstFile";
+ Path srcPath = path(srcFile);
+ FSDataOutputStream srcStream = fs.create(srcPath);
+ assertTrue(fs.exists(srcPath));
+ Path dstPath = path(dstFile);
+ FSDataOutputStream dstStream = fs.create(dstPath);
+ assertTrue(fs.exists(dstPath));
+ NativeAzureFileSystem nfs = fs;
+ final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath));
+ final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath));
+ nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null);
+ assertTrue(fs.exists(dstPath));
+ assertFalse(fs.exists(srcPath));
+ IOUtils.cleanupWithLogger(null, srcStream);
+ IOUtils.cleanupWithLogger(null, dstStream);
+ }
+ /**
+ * Tests fs.delete() function to delete a blob when another blob is holding a
+ * lease on it. Delete if called without a lease should fail if another process
+ * is holding a lease and throw appropriate exception
+ * This is a scenario that would happen in HMaster startup when it tries to
+ * clean up the temp dirs while the HMaster process which was killed earlier
+ * held lease on the blob when doing some DDL operation
+ */
+ @Test
+ public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
+ throws Exception {
+ LOG.info("Starting test");
+ // Create the file
+ Path path = methodPath();
+ fs.create(path);
+ assertPathExists("test file", path);
+ NativeAzureFileSystem nfs = fs;
+ final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
+ final AzureNativeFileSystemStore store = nfs.getStore();
+
+ // Acquire the lease on the file in a background thread
+ final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
+ final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ // Acquire the lease and then signal the main test thread.
+ SelfRenewingLease lease = null;
+ try {
+ lease = store.acquireLease(fullKey);
+ LOG.info("Lease acquired: " + lease.getLeaseID());
+ } catch (AzureException e) {
+ LOG.warn("Lease acqusition thread unable to acquire lease", e);
+ } finally {
+ leaseAttemptComplete.countDown();
+ }
+
+ // Wait for the main test thread to signal it will attempt the delete.
+ try {
+ beginningDeleteAttempt.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+
+ // Keep holding the lease past the lease acquisition retry interval, so
+ // the test covers the case of delete retrying to acquire the lease.
+ try {
+ Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+
+ try {
+ if (lease != null){
+ LOG.info("Freeing lease");
+ lease.free();
+ }
+ } catch (StorageException se) {
+ LOG.warn("Unable to free lease.", se);
+ }
+ }
+ };
+
+ // Start the background thread and wait for it to signal the lease is held.
+ t.start();
+ try {
+ leaseAttemptComplete.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+
+ // Try to delete the same file
+ beginningDeleteAttempt.countDown();
+ store.delete(fullKey);
+
+ // At this point file SHOULD BE DELETED
+ assertPathDoesNotExist("Leased path", path);
+ }
+
+ /**
+ * Check that isPageBlobKey works as expected. This assumes that
+ * in the test configuration, the list of supported page blob directories
+ * only includes "pageBlobs". That's why this test is made specific
+ * to this subclass.
+ */
+ @Test
+ public void testIsPageBlobKey() {
+ AzureNativeFileSystemStore store = fs.getStore();
+
+ // Use literal strings so it's easier to understand the tests.
+ // In case the constant changes, we want to know about it so we can update this test.
+ assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs");
+
+ // URI prefix for test environment.
+ String uriPrefix = "file:///";
+
+ // negative tests
+ String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo",
+ "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" };
+ for (String s : negativeKeys) {
+ assertFalse(store.isPageBlobKey(s));
+ assertFalse(store.isPageBlobKey(uriPrefix + s));
+ }
+
+ // positive tests
+ String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" };
+ for (String s : positiveKeys) {
+ assertTrue(store.isPageBlobKey(s));
+ assertTrue(store.isPageBlobKey(uriPrefix + s));
+ }
+ }
+
+ /**
+ * Test that isAtomicRenameKey() works as expected.
+ */
+ @Test
+ public void testIsAtomicRenameKey() {
+
+ AzureNativeFileSystemStore store = fs.getStore();
+
+ // We want to know if the default configuration changes so we can fix
+ // this test.
+ assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES,
+ "/atomicRenameDir1,/atomicRenameDir2");
+
+ // URI prefix for test environment.
+ String uriPrefix = "file:///";
+
+ // negative tests
+ String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase",
+ "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase",
+ "hbasexyz/", "foo/atomicRenameDir1/"};
+ for (String s : negativeKeys) {
+ assertFalse(store.isAtomicRenameKey(s));
+ assertFalse(store.isAtomicRenameKey(uriPrefix + s));
+ }
+
+ // Positive tests. The directories for atomic rename are /hbase
+ // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES
+ // for this test).
+ String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/",
+ "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"};
+ for (String s : positiveKeys) {
+ assertTrue(store.isAtomicRenameKey(s));
+ assertTrue(store.isAtomicRenameKey(uriPrefix + s));
+ }
+ }
+
+ /**
+ * Tests fs.mkdir() function to create a target blob while another thread
+ * is holding the lease on the blob. mkdir should not fail since the blob
+ * already exists.
+ * This is a scenario that would happen in HBase distributed log splitting.
+ * Multiple threads will try to create and update "recovered.edits" folder
+ * under the same path.
+ */
+ @Test
+ public void testMkdirOnExistingFolderWithLease() throws Exception {
+ SelfRenewingLease lease;
+ // Create the folder
+ Path path = methodPath();
+ fs.mkdirs(path);
+ NativeAzureFileSystem nfs = fs;
+ String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
+ AzureNativeFileSystemStore store = nfs.getStore();
+ // Acquire the lease on the folder
+ lease = store.acquireLease(fullKey);
+ assertNotNull("lease ID", lease.getLeaseID() != null);
+ // Try to create the same folder
+ store.storeEmptyFolder(fullKey,
+ nfs.createPermissionStatus(FsPermission.getDirDefault()));
+ lease.free();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java
new file mode 100644
index 0000000..b63aaf0
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
+
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+/**
+ * Live blob operations.
+ */
+public class ITestOutOfBandAzureBlobOperationsLive extends AbstractWasbTestBase {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>")
+ // eg oob creation of "user/<name>/testFolder/a/input/file"
+ // Then wasb creation of "user/<name>/testFolder/a/output" fails
+ @Test
+ public void outOfBandFolder_uncleMkdirs() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder1/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
+
+ Path targetFolder = new Path("testFolder1/a/output");
+ assertTrue(fs.mkdirs(targetFolder));
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_parentDelete() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder2/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
+
+ Path targetFolder = new Path("testFolder2/a/input");
+ assertTrue(fs.delete(targetFolder, true));
+ }
+
+ @Test
+ public void outOfBandFolder_rootFileDelete() throws Exception {
+
+ CloudBlockBlob blob = testAccount.getBlobReference("fileY");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("/fileY")));
+ assertTrue(fs.delete(new Path("/fileY"), true));
+ }
+
+ @Test
+ public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
+
+ CloudBlockBlob blob = testAccount.getBlobReference("folderW/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("/folderW")));
+ assertTrue(fs.exists(new Path("/folderW/file")));
+ assertTrue(fs.delete(new Path("/folderW"), true));
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_siblingCreate() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder3/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
+
+ Path targetFile = new Path("testFolder3/a/input/file2");
+ FSDataOutputStream s2 = fs.create(targetFile);
+ s2.close();
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ // creating a new file in the root folder
+ @Test
+ public void outOfBandFolder_create_rootDir() throws Exception {
+ Path targetFile = new Path("/newInRoot");
+ FSDataOutputStream s2 = fs.create(targetFile);
+ s2.close();
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_rename() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder4/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+
+ Path srcFilePath = new Path("testFolder4/a/input/file");
+ assertTrue(fs.exists(srcFilePath));
+
+ Path destFilePath = new Path("testFolder4/a/input/file2");
+ fs.rename(srcFilePath, destFilePath);
+ }
+
+ // Verify that you can rename a file which is the only file in an implicit folder in the
+ // WASB file system.
+ // scenario for this particular test described at MONARCH-HADOOP-892
+ @Test
+ public void outOfBandSingleFile_rename() throws Exception {
+
+ //NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+
+ Path srcFilePath = new Path("testFolder5/a/input/file");
+ assertTrue(fs.exists(srcFilePath));
+
+ Path destFilePath = new Path("testFolder5/file2");
+ fs.rename(srcFilePath, destFilePath);
+ }
+
+ // WASB must force explicit parent directories in create, delete, mkdirs, rename.
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ CloudBlockBlob blob = testAccount.getBlobReference("fileX");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+
+ Path srcFilePath = new Path("/fileX");
+ assertTrue(fs.exists(srcFilePath));
+
+ Path destFilePath = new Path("/fileXrename");
+ fs.rename(srcFilePath, destFilePath);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java
new file mode 100644
index 0000000..f2af116
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java
@@ -0,0 +1,341 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils .*;
+
+/**
+ * Write data into a page blob and verify you can read back all of it
+ * or just a part of it.
+ */
+public class ITestReadAndSeekPageBlobAfterWrite extends AbstractAzureScaleTest {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ITestReadAndSeekPageBlobAfterWrite.class);
+
+ private FileSystem fs;
+ private byte[] randomData;
+
+ // Page blob physical page size
+ private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
+
+ // Size of data on page (excluding header)
+ private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
+ private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
+ private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
+ private Random rand = new Random();
+
+ // A key with a prefix under /pageBlobs, which for the test file system will
+ // force use of a page blob.
+ private static final String KEY = "/pageBlobs/file.dat";
+
+ // path of page blob file to read and write
+ private Path blobPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ fs = getTestAccount().getFileSystem();
+ // Make sure we are using an integral number of pages.
+ assertEquals(0, MAX_BYTES % PAGE_SIZE);
+
+ // load an in-memory array of random data
+ randomData = new byte[PAGE_SIZE * MAX_PAGES];
+ rand.nextBytes(randomData);
+
+ blobPath = blobPath("ITestReadAndSeekPageBlobAfterWrite");
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ deleteQuietly(fs, blobPath, true);
+ super.tearDown();
+ }
+
+ /**
+ * Make sure the file name (key) is a page blob file name. If anybody changes that,
+ * we need to come back and update this test class.
+ */
+ @Test
+ public void testIsPageBlobFileName() {
+ AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
+ String[] a = blobPath.toUri().getPath().split("/");
+ String key2 = a[1] + "/";
+ assertTrue("Not a page blob: " + blobPath, store.isPageBlobKey(key2));
+ }
+
+ /**
+ * For a set of different file sizes, write some random data to a page blob,
+ * read it back, and compare that what was read is the same as what was written.
+ */
+ @Test
+ public void testReadAfterWriteRandomData() throws IOException {
+
+ // local shorthand
+ final int pds = PAGE_DATA_SIZE;
+
+ // Test for sizes at and near page boundaries
+ int[] dataSizes = {
+
+ // on first page
+ 0, 1, 2, 3,
+
+ // Near first physical page boundary (because the implementation
+ // stores PDS + the page header size bytes on each page).
+ pds - 1, pds, pds + 1, pds + 2, pds + 3,
+
+ // near second physical page boundary
+ (2 * pds) - 1, (2 * pds), (2 * pds) + 1, (2 * pds) + 2, (2 * pds) + 3,
+
+ // near tenth physical page boundary
+ (10 * pds) - 1, (10 * pds), (10 * pds) + 1, (10 * pds) + 2, (10 * pds) + 3,
+
+ // test one big size, >> 4MB (an internal buffer size in the code)
+ MAX_BYTES
+ };
+
+ for (int i : dataSizes) {
+ testReadAfterWriteRandomData(i);
+ }
+ }
+
+ private void testReadAfterWriteRandomData(int size) throws IOException {
+ writeRandomData(size);
+ readRandomDataAndVerify(size);
+ }
+
+ /**
+ * Read "size" bytes of data and verify that what was read and what was written
+ * are the same.
+ */
+ private void readRandomDataAndVerify(int size) throws AzureException, IOException {
+ byte[] b = new byte[size];
+ FSDataInputStream stream = fs.open(blobPath);
+ int bytesRead = stream.read(b);
+ stream.close();
+ assertEquals(bytesRead, size);
+
+ // compare the data read to the data written
+ assertTrue(comparePrefix(randomData, b, size));
+ }
+
+ // return true if the beginning "size" values of the arrays are the same
+ private boolean comparePrefix(byte[] a, byte[] b, int size) {
+ if (a.length < size || b.length < size) {
+ return false;
+ }
+ for (int i = 0; i < size; i++) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Write a specified amount of random data to the file path for this test class.
+ private void writeRandomData(int size) throws IOException {
+ OutputStream output = fs.create(blobPath);
+ output.write(randomData, 0, size);
+ output.close();
+ }
+
+ /**
+ * Write data to a page blob, open it, seek, and then read a range of data.
+ * Then compare that the data read from that range is the same as the data originally written.
+ */
+ @Test
+ public void testPageBlobSeekAndReadAfterWrite() throws IOException {
+ writeRandomData(PAGE_SIZE * MAX_PAGES);
+ int recordSize = 100;
+ byte[] b = new byte[recordSize];
+
+
+ try(FSDataInputStream stream = fs.open(blobPath)) {
+ // Seek to a boundary around the middle of the 6th page
+ int seekPosition = 5 * PAGE_SIZE + 250;
+ stream.seek(seekPosition);
+
+ // Read a record's worth of bytes and verify results
+ int bytesRead = stream.read(b);
+ verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
+
+ // Seek to another spot and read a record greater than a page
+ seekPosition = 10 * PAGE_SIZE + 250;
+ stream.seek(seekPosition);
+ recordSize = 1000;
+ b = new byte[recordSize];
+ bytesRead = stream.read(b);
+ verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
+
+ // Read the last 100 bytes of the file
+ recordSize = 100;
+ seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
+ stream.seek(seekPosition);
+ b = new byte[recordSize];
+ bytesRead = stream.read(b);
+ verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
+
+ // Read past the end of the file and we should get only partial data.
+ recordSize = 100;
+ seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
+ stream.seek(seekPosition);
+ b = new byte[recordSize];
+ bytesRead = stream.read(b);
+ assertEquals(50, bytesRead);
+
+ // compare last 50 bytes written with those read
+ byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
+ assertTrue(comparePrefix(tail, b, 50));
+ }
+ }
+
+ // Verify that reading a record of data after seeking gives the expected data.
+ private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
+ byte[] originalRecordData =
+ Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
+ assertEquals(recordSize, bytesRead);
+ assertTrue(comparePrefix(originalRecordData, b, recordSize));
+ }
+
+ // Test many small flushed writes interspersed with periodic hflush calls.
+ // For manual testing, increase NUM_WRITES to a large number.
+ // The goal for a long-running manual test is to make sure that it finishes
+ // and the close() call does not time out. It also facilitates debugging into
+ // hflush/hsync.
+ @Test
+ public void testManySmallWritesWithHFlush() throws IOException {
+ writeAndReadOneFile(50, 100, 20);
+ }
+
+ /**
+ * Write a total of numWrites * recordLength data to a file, read it back,
+ * and check to make sure what was read is the same as what was written.
+ * The syncInterval is the number of writes after which to call hflush to
+ * force the data to storage.
+ */
+ private void writeAndReadOneFile(int numWrites,
+ int recordLength, int syncInterval) throws IOException {
+
+ // A lower bound on the minimum time we think it will take to do
+ // a write to Azure storage.
+ final long MINIMUM_EXPECTED_TIME = 20;
+ LOG.info("Writing " + numWrites * recordLength + " bytes to " + blobPath.getName());
+ FSDataOutputStream output = fs.create(blobPath);
+ int writesSinceHFlush = 0;
+ try {
+
+ // Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
+ // to test concurrent execution gates.
+ output.flush();
+ output.hflush();
+ for (int i = 0; i < numWrites; i++) {
+ output.write(randomData, i * recordLength, recordLength);
+ writesSinceHFlush++;
+ output.flush();
+ if ((i % syncInterval) == 0) {
+ output.hflush();
+ writesSinceHFlush = 0;
+ }
+ }
+ } finally {
+ long start = Time.monotonicNow();
+ output.close();
+ long end = Time.monotonicNow();
+ LOG.debug("close duration = " + (end - start) + " msec.");
+ if (writesSinceHFlush > 0) {
+ assertTrue(String.format(
+ "close duration with >= 1 pending write is %d, less than minimum expected of %d",
+ end - start, MINIMUM_EXPECTED_TIME),
+ end - start >= MINIMUM_EXPECTED_TIME);
+ }
+ }
+
+ // Read the data back and check it.
+ FSDataInputStream stream = fs.open(blobPath);
+ int SIZE = numWrites * recordLength;
+ byte[] b = new byte[SIZE];
+ try {
+ stream.seek(0);
+ stream.read(b, 0, SIZE);
+ verifyReadRandomData(b, SIZE, 0, SIZE);
+ } finally {
+ stream.close();
+ }
+
+ // delete the file
+ fs.delete(blobPath, false);
+ }
+
+ // Test writing to a large file repeatedly as a stress test.
+ // Set the repetitions to a larger number for manual testing
+ // for a longer stress run.
+ @Test
+ public void testLargeFileStress() throws IOException {
+ int numWrites = 32;
+ int recordSize = 1024 * 1024;
+ int syncInterval = 10;
+ int repetitions = 1;
+ for (int i = 0; i < repetitions; i++) {
+ writeAndReadOneFile(numWrites, recordSize, syncInterval);
+ }
+ }
+
+ // Write to a file repeatedly to verify that it extends.
+ // The page blob file should start out at 128MB and finish at 256MB.
+ public void testFileSizeExtension() throws IOException {
+ final int writeSize = 1024 * 1024;
+ final int numWrites = 129;
+ final byte dataByte = 5;
+ byte[] data = new byte[writeSize];
+ Arrays.fill(data, dataByte);
+ try (FSDataOutputStream output = fs.create(blobPath)) {
+ for (int i = 0; i < numWrites; i++) {
+ output.write(data);
+ output.hflush();
+ LOG.debug("total writes = " + (i + 1));
+ }
+ }
+
+ // Show that we wrote more than the default page blob file size.
+ assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
+
+ // Verify we can list the new size. That will prove we expanded the file.
+ FileStatus[] status = fs.listStatus(blobPath);
+ assertEquals("File size hasn't changed " + status,
+ numWrites * writeSize, status[0].getLen());
+ LOG.debug("Total bytes written to " + blobPath + " = " + status[0].getLen());
+ fs.delete(blobPath, false);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java
new file mode 100644
index 0000000..062bc36
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java
@@ -0,0 +1,568 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.ParseException;
+import org.apache.http.HeaderElement;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpGet;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+import org.junit.Assume;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
+
+import java.io.ByteArrayInputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.charset.StandardCharsets;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.times;
+
+/**
+ * Test class to hold all WasbRemoteCallHelper tests.
+ */
+public class ITestWasbRemoteCallHelper
+ extends AbstractWasbTestBase {
+ public static final String EMPTY_STRING = "";
+ private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
+ conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/,http://localhost:8080");
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
+ boolean useAuthorization = fs.getConf()
+ .getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
+ Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
+ useSecureMode && useAuthorization);
+ }
+
+ @Rule
+ public ExpectedException expectedEx = ExpectedException.none();
+
+ /**
+ * Test invalid status-code.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testInvalidStatusCode() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any()))
+ .thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine())
+ .thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test invalid Content-Type.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testInvalidContentType() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "text/plain"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test missing Content-Length.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testMissingContentLength() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test Content-Length exceeds max.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testContentLengthExceedsMax() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "2048"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test invalid Content-Length value
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testInvalidContentLengthValue() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "20abc48"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test valid JSON response.
+ * @throws Throwable
+ */
+ @Test
+ public void testValidJSONResponse() throws Throwable {
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test malformed JSON response.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testMalFormedJSONResponse() throws Throwable {
+
+ expectedEx.expect(WasbAuthorizationException.class);
+ expectedEx.expectMessage("com.fasterxml.jackson.core.JsonParseException: Unexpected end-of-input in FIELD_NAME");
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(malformedJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test valid JSON response failure response code.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testFailureCodeJSONResponse() throws Throwable {
+
+ expectedEx.expect(WasbAuthorizationException.class);
+ expectedEx.expectMessage("Remote authorization service encountered an error Unauthorized");
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(failureCodeJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ @Test
+ public void testWhenOneInstanceIsDown() throws Throwable {
+
+ boolean isAuthorizationCachingEnabled = fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, false);
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService1.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService1.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService2.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService2.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseServiceLocal = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseServiceLocal.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseServiceLocal.getEntity())
+ .thenReturn(mockHttpEntity);
+
+
+
+ class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost1");
+ }
+ }
+ class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost2");
+ }
+ }
+ class HttpGetForServiceLocal extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ try {
+ return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
+ } catch (UnknownHostException e) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost");
+ }
+ }
+ }
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
+ .thenReturn(mockHttpResponseService1);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
+ .thenReturn(mockHttpResponseService2);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal())))
+ .thenReturn(mockHttpResponseServiceLocal);
+
+ //Need 2 times because performop() does 2 fs operations.
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(validJsonResponse()
+ .getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse()
+ .getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse()
+ .getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+
+ int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 1 : 2;
+ Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
+ Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForService2()));
+ }
+
+ @Test
+ public void testWhenServiceInstancesAreDown() throws Throwable {
+ //expectedEx.expect(WasbAuthorizationException.class);
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService1.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService1.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService2.getStatusLine())
+ .thenReturn(newStatusLine(
+ HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService2.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseService3 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService3.getStatusLine())
+ .thenReturn(newStatusLine(
+ HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService3.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost1");
+ }
+ }
+ class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost2");
+ }
+ }
+ class HttpGetForService3 extends ArgumentMatcher<HttpGet> {
+ @Override public boolean matches(Object o){
+ try {
+ return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
+ } catch (UnknownHostException e) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost");
+ }
+ }
+ }
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
+ .thenReturn(mockHttpResponseService1);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
+ .thenReturn(mockHttpResponseService2);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService3())))
+ .thenReturn(mockHttpResponseService3);
+
+ //Need 3 times because performop() does 3 fs operations.
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(
+ validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(
+ validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(
+ validJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+ try {
+ performop(mockHttpClient);
+ }catch (WasbAuthorizationException e){
+ e.printStackTrace();
+ Mockito.verify(mockHttpClient, atLeast(2))
+ .execute(argThat(new HttpGetForService1()));
+ Mockito.verify(mockHttpClient, atLeast(2))
+ .execute(argThat(new HttpGetForService2()));
+ Mockito.verify(mockHttpClient, atLeast(3))
+ .execute(argThat(new HttpGetForService3()));
+ Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any());
+ }
+ }
+
+ private void setupExpectations() {
+ expectedEx.expect(WasbAuthorizationException.class);
+
+ class MatchesPattern extends TypeSafeMatcher<String> {
+ private String pattern;
+
+ MatchesPattern(String pattern) {
+ this.pattern = pattern;
+ }
+
+ @Override protected boolean matchesSafely(String item) {
+ return item.matches(pattern);
+ }
+
+ @Override public void describeTo(Description description) {
+ description.appendText("matches pattern ").appendValue(pattern);
+ }
+
+ @Override protected void describeMismatchSafely(String item,
+ Description mismatchDescription) {
+ mismatchDescription.appendText("does not match");
+ }
+ }
+
+ expectedEx.expectMessage(new MatchesPattern(
+ "org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: "
+ + "Encountered error while making remote call to "
+ + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/,http:\\/\\/localhost:8080 retried 6 time\\(s\\)\\."));
+ }
+
+ private void performop(HttpClient mockHttpClient) throws Throwable {
+
+ Path testPath = new Path("/", "test.dat");
+
+ RemoteWasbAuthorizerImpl authorizer = new RemoteWasbAuthorizerImpl();
+ authorizer.init(fs.getConf());
+ WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper(
+ RetryUtils.getMultipleLinearRandomRetry(new Configuration(),
+ EMPTY_STRING, true,
+ EMPTY_STRING, "1000,3,10000,2"));
+ mockWasbRemoteCallHelper.updateHttpClient(mockHttpClient);
+ authorizer.updateWasbRemoteCallHelper(mockWasbRemoteCallHelper);
+ fs.updateWasbAuthorizer(authorizer);
+
+ fs.create(testPath);
+ ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
+ fs.delete(testPath, false);
+ }
+
+ private String validJsonResponse() {
+ return "{"
+ + "\"responseCode\": 0,"
+ + "\"authorizationResult\": true,"
+ + "\"responseMessage\": \"Authorized\""
+ + "}";
+ }
+
+ private String malformedJsonResponse() {
+ return "{"
+ + "\"responseCode\": 0,"
+ + "\"authorizationResult\": true,"
+ + "\"responseMessage\":";
+ }
+
+ private String failureCodeJsonResponse() {
+ return "{"
+ + "\"responseCode\": 1,"
+ + "\"authorizationResult\": false,"
+ + "\"responseMessage\": \"Unauthorized\""
+ + "}";
+ }
+
+ private StatusLine newStatusLine(int statusCode) {
+ return new StatusLine() {
+ @Override
+ public ProtocolVersion getProtocolVersion() {
+ return new ProtocolVersion("HTTP", 1, 1);
+ }
+
+ @Override
+ public int getStatusCode() {
+ return statusCode;
+ }
+
+ @Override
+ public String getReasonPhrase() {
+ return "Reason Phrase";
+ }
+ };
+ }
+
+ private Header newHeader(String name, String value) {
+ return new Header() {
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public HeaderElement[] getElements() throws ParseException {
+ return new HeaderElement[0];
+ }
+ };
+ }
+
+ /** Check that a HttpGet request is with given remote host. */
+ private static boolean checkHttpGetMatchHost(HttpGet g, String h) {
+ return g != null && g.getURI().getHost().equals(h);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java
new file mode 100644
index 0000000..bee0220
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java
@@ -0,0 +1,610 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.Date;
+import java.util.EnumSet;
+import java.io.File;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+public class ITestWasbUriAndConfiguration extends AbstractWasbTestWithTimeout {
+
+ private static final int FILE_SIZE = 4096;
+ private static final String PATH_DELIMITER = "/";
+
+ protected String accountName;
+ protected String accountKey;
+ protected static Configuration conf = null;
+ private boolean runningInSASMode = false;
+ @Rule
+ public final TemporaryFolder tempDir = new TemporaryFolder();
+
+ private AzureBlobStorageTestAccount testAccount;
+
+ @After
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanupTestAccount(testAccount);
+ }
+
+ @Before
+ public void setMode() {
+ runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
+ getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
+ }
+
+ private boolean validateIOStreams(Path filePath) throws IOException {
+ // Capture the file system from the test account.
+ FileSystem fs = testAccount.getFileSystem();
+ return validateIOStreams(fs, filePath);
+ }
+
+ private boolean validateIOStreams(FileSystem fs, Path filePath)
+ throws IOException {
+
+ // Create and write a file
+ OutputStream outputStream = fs.create(filePath);
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+
+ // Return true if the the count is equivalent to the file size.
+ return (FILE_SIZE == readInputStream(fs, filePath));
+ }
+
+ private int readInputStream(Path filePath) throws IOException {
+ // Capture the file system from the test account.
+ FileSystem fs = testAccount.getFileSystem();
+ return readInputStream(fs, filePath);
+ }
+
+ private int readInputStream(FileSystem fs, Path filePath) throws IOException {
+ // Read the file
+ InputStream inputStream = fs.open(filePath);
+ int count = 0;
+ while (inputStream.read() >= 0) {
+ count++;
+ }
+ inputStream.close();
+
+ // Return true if the the count is equivalent to the file size.
+ return count;
+ }
+
+ // Positive tests to exercise making a connection with to Azure account using
+ // account key.
+ @Test
+ public void testConnectUsingKey() throws Exception {
+
+ testAccount = AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+
+ // Validate input and output on the connection.
+ assertTrue(validateIOStreams(new Path("/wasb_scheme")));
+ }
+
+ @Test
+ public void testConnectUsingSAS() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ // Create the test account with SAS credentials.
+ testAccount = AzureBlobStorageTestAccount.create("",
+ EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer));
+ assumeNotNull(testAccount);
+ // Validate input and output on the connection.
+ // NOTE: As of 4/15/2013, Azure Storage has a deficiency that prevents the
+ // full scenario from working (CopyFromBlob doesn't work with SAS), so
+ // just do a minor check until that is corrected.
+ assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist")));
+ //assertTrue(validateIOStreams(new Path("/sastest.txt")));
+ }
+
+ @Test
+ public void testConnectUsingSASReadonly() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ // Create the test account with SAS credentials.
+ testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(
+ CreateOptions.UseSas, CreateOptions.CreateContainer,
+ CreateOptions.Readonly));
+ assumeNotNull(testAccount);
+
+ // Create a blob in there
+ final String blobKey = "blobForReadonly";
+ CloudBlobContainer container = testAccount.getRealContainer();
+ CloudBlockBlob blob = container.getBlockBlobReference(blobKey);
+ ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] { 1,
+ 2, 3 });
+ blob.upload(inputStream, 3);
+ inputStream.close();
+
+ // Make sure we can read it from the file system
+ Path filePath = new Path("/" + blobKey);
+ FileSystem fs = testAccount.getFileSystem();
+ assertTrue(fs.exists(filePath));
+ byte[] obtained = new byte[3];
+ DataInputStream obtainedInputStream = fs.open(filePath);
+ obtainedInputStream.readFully(obtained);
+ obtainedInputStream.close();
+ assertEquals(3, obtained[2]);
+ }
+
+ @Test
+ public void testConnectUsingAnonymous() throws Exception {
+
+ // Create test account with anonymous credentials
+ testAccount = AzureBlobStorageTestAccount.createAnonymous("testWasb.txt",
+ FILE_SIZE);
+ assumeNotNull(testAccount);
+
+ // Read the file from the public folder using anonymous credentials.
+ assertEquals(FILE_SIZE, readInputStream(new Path("/testWasb.txt")));
+ }
+
+ @Test
+ public void testConnectToEmulator() throws Exception {
+ testAccount = AzureBlobStorageTestAccount.createForEmulator();
+ assumeNotNull(testAccount);
+ assertTrue(validateIOStreams(new Path("/testFile")));
+ }
+
+ /**
+ * Tests that we can connect to fully qualified accounts outside of
+ * blob.core.windows.net
+ */
+ @Test
+ public void testConnectToFullyQualifiedAccountMock() throws Exception {
+ Configuration conf = new Configuration();
+ AzureBlobStorageTestAccount.setMockAccountKey(conf,
+ "mockAccount.mock.authority.net");
+ AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+ MockStorageInterface mockStorage = new MockStorageInterface();
+ store.setAzureStorageInteractionLayer(mockStorage);
+ NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
+ fs.initialize(
+ new URI("wasb://mockContainer@mockAccount.mock.authority.net"), conf);
+ fs.createNewFile(new Path("/x"));
+ assertTrue(mockStorage.getBackingStore().exists(
+ "http://mockAccount.mock.authority.net/mockContainer/x"));
+ fs.close();
+ }
+
+ public void testConnectToRoot() throws Exception {
+
+ // Set up blob names.
+ final String blobPrefix = String.format("wasbtests-%s-%tQ-blob",
+ System.getProperty("user.name"), new Date());
+ final String inblobName = blobPrefix + "_In" + ".txt";
+ final String outblobName = blobPrefix + "_Out" + ".txt";
+
+ // Create test account with default root access.
+ testAccount = AzureBlobStorageTestAccount.createRoot(inblobName, FILE_SIZE);
+ assumeNotNull(testAccount);
+
+ // Read the file from the default container.
+ assertEquals(FILE_SIZE, readInputStream(new Path(PATH_DELIMITER
+ + inblobName)));
+
+ try {
+ // Capture file system.
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Create output path and open an output stream to the root folder.
+ Path outputPath = new Path(PATH_DELIMITER + outblobName);
+ OutputStream outputStream = fs.create(outputPath);
+ fail("Expected an AzureException when writing to root folder.");
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+ } catch (AzureException e) {
+ assertTrue(true);
+ } catch (Exception e) {
+ String errMsg = String.format(
+ "Expected AzureException but got %s instead.", e);
+ assertTrue(errMsg, false);
+ }
+ }
+
+ // Positive tests to exercise throttling I/O path. Connections are made to an
+ // Azure account using account key.
+ //
+ public void testConnectWithThrottling() throws Exception {
+
+ testAccount = AzureBlobStorageTestAccount.createThrottled();
+
+ // Validate input and output on the connection.
+ assertTrue(validateIOStreams(new Path("/wasb_scheme")));
+ }
+
+ /**
+ * Creates a file and writes a single byte with the given value in it.
+ */
+ private static void writeSingleByte(FileSystem fs, Path testFile, int toWrite)
+ throws Exception {
+ OutputStream outputStream = fs.create(testFile);
+ outputStream.write(toWrite);
+ outputStream.close();
+ }
+
+ /**
+ * Reads the file given and makes sure that it's a single-byte file with the
+ * given value in it.
+ */
+ private static void assertSingleByteValue(FileSystem fs, Path testFile,
+ int expectedValue) throws Exception {
+ InputStream inputStream = fs.open(testFile);
+ int byteRead = inputStream.read();
+ assertTrue("File unexpectedly empty: " + testFile, byteRead >= 0);
+ assertTrue("File has more than a single byte: " + testFile,
+ inputStream.read() < 0);
+ inputStream.close();
+ assertEquals("Unxpected content in: " + testFile, expectedValue, byteRead);
+ }
+
+ @Test
+ public void testMultipleContainers() throws Exception {
+ AzureBlobStorageTestAccount firstAccount = AzureBlobStorageTestAccount
+ .create("first"), secondAccount = AzureBlobStorageTestAccount
+ .create("second");
+ assumeNotNull(firstAccount);
+ assumeNotNull(secondAccount);
+ try {
+ FileSystem firstFs = firstAccount.getFileSystem(),
+ secondFs = secondAccount.getFileSystem();
+ Path testFile = new Path("/testWasb");
+ assertTrue(validateIOStreams(firstFs, testFile));
+ assertTrue(validateIOStreams(secondFs, testFile));
+ // Make sure that we're really dealing with two file systems here.
+ writeSingleByte(firstFs, testFile, 5);
+ writeSingleByte(secondFs, testFile, 7);
+ assertSingleByteValue(firstFs, testFile, 5);
+ assertSingleByteValue(secondFs, testFile, 7);
+ } finally {
+ firstAccount.cleanup();
+ secondAccount.cleanup();
+ }
+ }
+
+ @Test
+ public void testDefaultKeyProvider() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+ String key = "testkey";
+
+ conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
+
+ String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
+ account, conf);
+ assertEquals(key, result);
+ }
+
+ @Test
+ public void testCredsFromCredentialProvider() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ String account = "testacct";
+ String key = "testkey";
+ // set up conf to have a cred provider
+ final Configuration conf = new Configuration();
+ final File file = tempDir.newFile("test.jks");
+ final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
+ file.toURI());
+ conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+ jks.toString());
+
+ provisionAccountKey(conf, account, key);
+
+ // also add to configuration as clear text that should be overridden
+ conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,
+ key + "cleartext");
+
+ String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
+ account, conf);
+ // result should contain the credential provider key not the config key
+ assertEquals("AccountKey incorrect.", key, result);
+ }
+
+ void provisionAccountKey(
+ final Configuration conf, String account, String key) throws Exception {
+ // add our creds to the provider
+ final CredentialProvider provider =
+ CredentialProviderFactory.getProviders(conf).get(0);
+ provider.createCredentialEntry(
+ SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key.toCharArray());
+ provider.flush();
+ }
+
+ @Test
+ public void testValidKeyProvider() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+ String key = "testkey";
+
+ conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
+ conf.setClass("fs.azure.account.keyprovider." + account,
+ SimpleKeyProvider.class, KeyProvider.class);
+ String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
+ account, conf);
+ assertEquals(key, result);
+ }
+
+ @Test
+ public void testInvalidKeyProviderNonexistantClass() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+
+ conf.set("fs.azure.account.keyprovider." + account,
+ "org.apache.Nonexistant.Class");
+ try {
+ AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
+ Assert.fail("Nonexistant key provider class should have thrown a "
+ + "KeyProviderException");
+ } catch (KeyProviderException e) {
+ }
+ }
+
+ @Test
+ public void testInvalidKeyProviderWrongClass() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+
+ conf.set("fs.azure.account.keyprovider." + account, "java.lang.String");
+ try {
+ AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
+ Assert.fail("Key provider class that doesn't implement KeyProvider "
+ + "should have thrown a KeyProviderException");
+ } catch (KeyProviderException e) {
+ }
+ }
+
+ /**
+ * Tests the cases when the URI is specified with no authority, i.e.
+ * wasb:///path/to/file.
+ */
+ @Test
+ public void testNoUriAuthority() throws Exception {
+ // For any combination of default FS being asv(s)/wasb(s)://c@a/ and
+ // the actual URI being asv(s)/wasb(s):///, it should work.
+
+ String[] wasbAliases = new String[] { "wasb", "wasbs" };
+ for (String defaultScheme : wasbAliases) {
+ for (String wantedScheme : wasbAliases) {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI(defaultScheme, authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ // Add references to file system implementations for wasb and wasbs.
+ conf.addResource("azure-test.xml");
+ URI wantedUri = new URI(wantedScheme + ":///random/path");
+ NativeAzureFileSystem obtained = (NativeAzureFileSystem) FileSystem
+ .get(wantedUri, conf);
+ assertNotNull(obtained);
+ assertEquals(new URI(wantedScheme, authority, null, null, null),
+ obtained.getUri());
+ // Make sure makeQualified works as expected
+ Path qualified = obtained.makeQualified(new Path(wantedUri));
+ assertEquals(new URI(wantedScheme, authority, wantedUri.getPath(),
+ null, null), qualified.toUri());
+ // Cleanup for the next iteration to not cache anything in FS
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+ // If the default FS is not a WASB FS, then specifying a URI without
+ // authority for the Azure file system should throw.
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ conf.set(FS_DEFAULT_NAME_KEY, "file:///");
+ try {
+ FileSystem.get(new URI("wasb:///random/path"), conf);
+ fail("Should've thrown.");
+ } catch (IllegalArgumentException e) {
+ }
+ }
+
+ @Test
+ public void testWasbAsDefaultFileSystemHasNoPort() throws Exception {
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasb", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.addResource("azure-test.xml");
+
+ FileSystem fs = FileSystem.get(conf);
+ assertTrue(fs instanceof NativeAzureFileSystem);
+ assertEquals(-1, fs.getUri().getPort());
+
+ AbstractFileSystem afs = FileContext.getFileContext(conf)
+ .getDefaultFileSystem();
+ assertTrue(afs instanceof Wasb);
+ assertEquals(-1, afs.getUri().getPort());
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+
+ /**
+ * Tests the cases when the scheme specified is 'wasbs'.
+ */
+ @Test
+ public void testAbstractFileSystemImplementationForWasbsScheme() throws Exception {
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+ conf.addResource("azure-test.xml");
+
+ FileSystem fs = FileSystem.get(conf);
+ assertTrue(fs instanceof NativeAzureFileSystem);
+ assertEquals("wasbs", fs.getScheme());
+
+ AbstractFileSystem afs = FileContext.getFileContext(conf)
+ .getDefaultFileSystem();
+ assertTrue(afs instanceof Wasbs);
+ assertEquals(-1, afs.getUri().getPort());
+ assertEquals("wasbs", afs.getUri().getScheme());
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+
+ @Test
+ public void testNoAbstractFileSystemImplementationSpecifiedForWasbsScheme() throws Exception {
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+
+ FileSystem fs = FileSystem.get(conf);
+ assertTrue(fs instanceof NativeAzureFileSystem);
+ assertEquals("wasbs", fs.getScheme());
+
+ // should throw if 'fs.AbstractFileSystem.wasbs.impl'' is not specified
+ try{
+ FileContext.getFileContext(conf).getDefaultFileSystem();
+ fail("Should've thrown.");
+ }catch(UnsupportedFileSystemException e){
+ }
+
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+
+ @Test
+ public void testCredentialProviderPathExclusions() throws Exception {
+ String providerPath =
+ "user:///,jceks://wasb/user/hrt_qa/sqoopdbpasswd.jceks," +
+ "jceks://hdfs@nn1.example.com/my/path/test.jceks";
+ Configuration config = new Configuration();
+ config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+ providerPath);
+ String newPath = "user:///,jceks://hdfs@nn1.example.com/my/path/test.jceks";
+
+ excludeAndTestExpectations(config, newPath);
+ }
+
+ @Test
+ public void testExcludeAllProviderTypesFromConfig() throws Exception {
+ String providerPath =
+ "jceks://wasb/tmp/test.jceks," +
+ "jceks://wasb@/my/path/test.jceks";
+ Configuration config = new Configuration();
+ config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+ providerPath);
+ String newPath = null;
+
+ excludeAndTestExpectations(config, newPath);
+ }
+
+ void excludeAndTestExpectations(Configuration config, String newPath)
+ throws Exception {
+ Configuration conf = ProviderUtils.excludeIncompatibleCredentialProviders(
+ config, NativeAzureFileSystem.class);
+ String effectivePath = conf.get(
+ CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
+ assertEquals(newPath, effectivePath);
+ }
+
+ @Test
+ public void testUserAgentConfig() throws Exception {
+ // Set the user agent
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+
+ conf.set(AzureNativeFileSystemStore.USER_AGENT_ID_KEY, "TestClient");
+
+ FileSystem fs = FileSystem.get(conf);
+ AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
+
+ assertTrue(afs instanceof Wasbs);
+ assertEquals(-1, afs.getUri().getPort());
+ assertEquals("wasbs", afs.getUri().getScheme());
+
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+
+ // Unset the user agent
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+
+ conf.unset(AzureNativeFileSystemStore.USER_AGENT_ID_KEY);
+
+ FileSystem fs = FileSystem.get(conf);
+ AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
+ assertTrue(afs instanceof Wasbs);
+ assertEquals(-1, afs.getUri().getPort());
+ assertEquals("wasbs", afs.getUri().getScheme());
+
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
index 9fbab49..7354499 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
@@ -38,11 +38,12 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
private boolean performOwnerMatch;
private CachingAuthorizer<CachedAuthorizerEntry, Boolean> cache;
- // The full qualified URL to the root directory
+ // The full qualified URL to the root directory
private String qualifiedPrefixUrl;
public MockWasbAuthorizerImpl(NativeAzureFileSystem fs) {
- qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory())
+ qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(),
+ fs.getWorkingDirectory())
.toString().replaceAll("/$", "");
cache = new CachingAuthorizer<>(TimeUnit.MINUTES.convert(5L, TimeUnit.MINUTES), "AUTHORIZATION");
}
@@ -64,19 +65,23 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
public void addAuthRule(String wasbAbsolutePath,
String accessType, boolean access) {
- wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
- AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
- ? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"), accessType)
+ wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
+ AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
+ ? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"),
+ accessType)
: new AuthorizationComponent(wasbAbsolutePath, accessType);
this.authRules.put(component, access);
}
@Override
- public boolean authorize(String wasbAbsolutePath, String accessType, String owner)
+ public boolean authorize(String wasbAbsolutePath,
+ String accessType,
+ String owner)
throws WasbAuthorizationException {
- if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
+ if (wasbAbsolutePath.endsWith(
+ NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
return true;
}
@@ -108,20 +113,23 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
// In case of root("/"), owner match does not happen because owner is returned as empty string.
// we try to force owner match just for purpose of tests to make sure all operations work seemlessly with owner.
if (this.performOwnerMatch
- && StringUtils.equalsIgnoreCase(wasbAbsolutePath, qualifiedPrefixUrl + "/")) {
+ && StringUtils.equalsIgnoreCase(wasbAbsolutePath,
+ qualifiedPrefixUrl + "/")) {
owner = currentUserShortName;
}
boolean shouldEvaluateOwnerAccess = owner != null && !owner.isEmpty()
- && this.performOwnerMatch;
+ && this.performOwnerMatch;
- boolean isOwnerMatch = StringUtils.equalsIgnoreCase(currentUserShortName, owner);
+ boolean isOwnerMatch = StringUtils.equalsIgnoreCase(currentUserShortName,
+ owner);
AuthorizationComponent component =
new AuthorizationComponent(wasbAbsolutePath, accessType);
if (authRules.containsKey(component)) {
- return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(component) : authRules.get(component);
+ return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(
+ component) : authRules.get(component);
} else {
// Regex-pattern match if we don't have a straight match
for (Map.Entry<AuthorizationComponent, Boolean> entry : authRules.entrySet()) {
@@ -129,8 +137,11 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
String keyPath = key.getWasbAbsolutePath();
String keyAccess = key.getAccessType();
- if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath) && keyAccess.equals(accessType)) {
- return shouldEvaluateOwnerAccess ? isOwnerMatch && entry.getValue() : entry.getValue();
+ if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath)
+ && keyAccess.equals(accessType)) {
+ return shouldEvaluateOwnerAccess
+ ? isOwnerMatch && entry.getValue()
+ : entry.getValue();
}
}
return false;
@@ -141,47 +152,47 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
authRules.clear();
cache.clear();
}
-}
-class AuthorizationComponent {
+ private static class AuthorizationComponent {
- private String wasbAbsolutePath;
- private String accessType;
+ private final String wasbAbsolutePath;
+ private final String accessType;
- public AuthorizationComponent(String wasbAbsolutePath,
- String accessType) {
- this.wasbAbsolutePath = wasbAbsolutePath;
- this.accessType = accessType;
- }
+ AuthorizationComponent(String wasbAbsolutePath,
+ String accessType) {
+ this.wasbAbsolutePath = wasbAbsolutePath;
+ this.accessType = accessType;
+ }
- @Override
- public int hashCode() {
- return this.wasbAbsolutePath.hashCode() ^ this.accessType.hashCode();
- }
+ @Override
+ public int hashCode() {
+ return this.wasbAbsolutePath.hashCode() ^ this.accessType.hashCode();
+ }
- @Override
- public boolean equals(Object obj) {
+ @Override
+ public boolean equals(Object obj) {
- if (obj == this) {
- return true;
- }
+ if (obj == this) {
+ return true;
+ }
- if (obj == null
- || !(obj instanceof AuthorizationComponent)) {
- return false;
- }
+ if (obj == null
+ || !(obj instanceof AuthorizationComponent)) {
+ return false;
+ }
- return ((AuthorizationComponent)obj).
- getWasbAbsolutePath().equals(this.wasbAbsolutePath)
- && ((AuthorizationComponent)obj).
- getAccessType().equals(this.accessType);
- }
+ return ((AuthorizationComponent) obj).
+ getWasbAbsolutePath().equals(this.wasbAbsolutePath)
+ && ((AuthorizationComponent) obj).
+ getAccessType().equals(this.accessType);
+ }
- public String getWasbAbsolutePath() {
- return this.wasbAbsolutePath;
- }
+ public String getWasbAbsolutePath() {
+ return this.wasbAbsolutePath;
+ }
- public String getAccessType() {
- return accessType;
+ public String getAccessType() {
+ return accessType;
+ }
}
-}
\ No newline at end of file
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
index 177477c..726b504 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
@@ -18,12 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
@@ -47,16 +41,18 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
-import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob;
+import static org.apache.hadoop.test.GenericTestUtils.*;
+
/*
* Tests the Native Azure file system (WASB) against an actual blob store if
* provided in the environment.
@@ -71,15 +67,46 @@ public abstract class NativeAzureFileSystemBaseTest
private final long modifiedTimeErrorMargin = 5 * 1000; // Give it +/-5 seconds
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class);
+ protected NativeAzureFileSystem fs;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ fs = getFileSystem();
+ }
+
+ /**
+ * Assert that a path does not exist.
+ *
+ * @param message message to include in the assertion failure message
+ * @param path path in the filesystem
+ * @throws IOException IO problems
+ */
+ public void assertPathDoesNotExist(String message,
+ Path path) throws IOException {
+ ContractTestUtils.assertPathDoesNotExist(fs, message, path);
+ }
+
+ /**
+ * Assert that a path exists.
+ *
+ * @param message message to include in the assertion failure message
+ * @param path path in the filesystem
+ * @throws IOException IO problems
+ */
+ public void assertPathExists(String message,
+ Path path) throws IOException {
+ ContractTestUtils.assertPathExists(fs, message, path);
+ }
@Test
public void testCheckingNonExistentOneLetterFile() throws Exception {
- assertFalse(fs.exists(new Path("/a")));
+ assertPathDoesNotExist("one letter file", new Path("/a"));
}
@Test
public void testStoreRetrieveFile() throws Exception {
- Path testFile = new Path("unit-test-file");
+ Path testFile = methodPath();
writeString(testFile, "Testing");
assertTrue(fs.exists(testFile));
FileStatus status = fs.getFileStatus(testFile);
@@ -93,7 +120,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testStoreDeleteFolder() throws Exception {
- Path testFolder = new Path("storeDeleteFolder");
+ Path testFolder = methodPath();
assertFalse(fs.exists(testFolder));
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.exists(testFolder));
@@ -105,22 +132,22 @@ public abstract class NativeAzureFileSystemBaseTest
assertEquals(new FsPermission((short) 0755), status.getPermission());
Path innerFile = new Path(testFolder, "innerFile");
assertTrue(fs.createNewFile(innerFile));
- assertTrue(fs.exists(innerFile));
+ assertPathExists("inner file", innerFile);
assertTrue(fs.delete(testFolder, true));
- assertFalse(fs.exists(innerFile));
- assertFalse(fs.exists(testFolder));
+ assertPathDoesNotExist("inner file", innerFile);
+ assertPathDoesNotExist("testFolder", testFolder);
}
@Test
public void testFileOwnership() throws Exception {
- Path testFile = new Path("ownershipTestFile");
+ Path testFile = methodPath();
writeString(testFile, "Testing");
testOwnership(testFile);
}
@Test
public void testFolderOwnership() throws Exception {
- Path testFolder = new Path("ownershipTestFolder");
+ Path testFolder = methodPath();
fs.mkdirs(testFolder);
testOwnership(testFolder);
}
@@ -147,7 +174,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testFilePermissions() throws Exception {
- Path testFile = new Path("permissionTestFile");
+ Path testFile = methodPath();
FsPermission permission = FsPermission.createImmutable((short) 644);
createEmptyFile(testFile, permission);
FileStatus ret = fs.getFileStatus(testFile);
@@ -157,7 +184,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testFolderPermissions() throws Exception {
- Path testFolder = new Path("permissionTestFolder");
+ Path testFolder = methodPath();
FsPermission permission = FsPermission.createImmutable((short) 644);
fs.mkdirs(testFolder, permission);
FileStatus ret = fs.getFileStatus(testFolder);
@@ -176,9 +203,9 @@ public abstract class NativeAzureFileSystemBaseTest
createEmptyFile(testFile, permission);
FsPermission rootPerm = fs.getFileStatus(firstDir.getParent()).getPermission();
FsPermission inheritPerm = FsPermission.createImmutable((short)(rootPerm.toShort() | 0300));
- assertTrue(fs.exists(testFile));
- assertTrue(fs.exists(firstDir));
- assertTrue(fs.exists(middleDir));
+ assertPathExists("test file", testFile);
+ assertPathExists("firstDir", firstDir);
+ assertPathExists("middleDir", middleDir);
// verify that the indirectly created directory inherited its permissions from the root directory
FileStatus directoryStatus = fs.getFileStatus(middleDir);
assertTrue(directoryStatus.isDirectory());
@@ -188,7 +215,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertFalse(fileStatus.isDirectory());
assertEqualsIgnoreStickyBit(umaskedPermission, fileStatus.getPermission());
assertTrue(fs.delete(firstDir, true));
- assertFalse(fs.exists(testFile));
+ assertPathDoesNotExist("deleted file", testFile);
// An alternative test scenario would've been to delete the file first,
// and then check for the existence of the upper folders still. But that
@@ -264,7 +291,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertTrue(fs.delete(new Path("deep"), true));
}
- private static enum RenameFolderVariation {
+ private enum RenameFolderVariation {
CreateFolderAndInnerFile, CreateJustInnerFile, CreateJustFolder
}
@@ -303,10 +330,10 @@ public abstract class NativeAzureFileSystemBaseTest
localFs.delete(localFilePath, true);
try {
writeString(localFs, localFilePath, "Testing");
- Path dstPath = new Path("copiedFromLocal");
+ Path dstPath = methodPath();
assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
fs.getConf()));
- assertTrue(fs.exists(dstPath));
+ assertPathExists("coied from local", dstPath);
assertEquals("Testing", readString(fs, dstPath));
fs.delete(dstPath, true);
} finally {
@@ -423,32 +450,32 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testReadingDirectoryAsFile() throws Exception {
- Path dir = new Path("/x");
+ Path dir = methodPath();
assertTrue(fs.mkdirs(dir));
try {
fs.open(dir).close();
assertTrue("Should've thrown", false);
} catch (FileNotFoundException ex) {
- assertEquals("/x is a directory not a file.", ex.getMessage());
+ assertExceptionContains("a directory not a file.", ex);
}
}
@Test
public void testCreatingFileOverDirectory() throws Exception {
- Path dir = new Path("/x");
+ Path dir = methodPath();
assertTrue(fs.mkdirs(dir));
try {
fs.create(dir).close();
assertTrue("Should've thrown", false);
} catch (IOException ex) {
- assertEquals("Cannot create file /x; already exists as a directory.",
- ex.getMessage());
+ assertExceptionContains("Cannot create file", ex);
+ assertExceptionContains("already exists as a directory", ex);
}
}
@Test
public void testInputStreamReadWithZeroSizeBuffer() throws Exception {
- Path newFile = new Path("zeroSizeRead");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@@ -460,7 +487,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEof() throws Exception {
- Path newFile = new Path("eofRead");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@@ -482,7 +509,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEofForLargeBuffer() throws Exception {
- Path newFile = new Path("eofRead2");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
byte[] outputBuff = new byte[97331];
for(int i = 0; i < outputBuff.length; ++i) {
@@ -508,7 +535,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadIntReturnsMinusOneOnEof() throws Exception {
- Path newFile = new Path("eofRead3");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@@ -525,7 +552,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetPermissionOnFile() throws Exception {
- Path newFile = new Path("testPermission");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
@@ -540,14 +567,14 @@ public abstract class NativeAzureFileSystemBaseTest
// Don't check the file length for page blobs. Only block blobs
// provide the actual length of bytes written.
- if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
+ if (!(this instanceof ITestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
}
@Test
public void testSetPermissionOnFolder() throws Exception {
- Path newFolder = new Path("testPermission");
+ Path newFolder = methodPath();
assertTrue(fs.mkdirs(newFolder));
FsPermission newPermission = new FsPermission((short) 0600);
fs.setPermission(newFolder, newPermission);
@@ -559,7 +586,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetOwnerOnFile() throws Exception {
- Path newFile = new Path("testOwner");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
@@ -571,7 +598,7 @@ public abstract class NativeAzureFileSystemBaseTest
// File length is only reported to be the size of bytes written to the file for block blobs.
// So only check it for block blobs, not page blobs.
- if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
+ if (!(this instanceof ITestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
fs.setOwner(newFile, null, "newGroup");
@@ -583,7 +610,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetOwnerOnFolder() throws Exception {
- Path newFolder = new Path("testOwner");
+ Path newFolder = methodPath();
assertTrue(fs.mkdirs(newFolder));
fs.setOwner(newFolder, "newUser", null);
FileStatus newStatus = fs.getFileStatus(newFolder);
@@ -594,21 +621,21 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testModifiedTimeForFile() throws Exception {
- Path testFile = new Path("testFile");
+ Path testFile = methodPath();
fs.create(testFile).close();
testModifiedTime(testFile);
}
@Test
public void testModifiedTimeForFolder() throws Exception {
- Path testFolder = new Path("testFolder");
+ Path testFolder = methodPath();
assertTrue(fs.mkdirs(testFolder));
testModifiedTime(testFolder);
}
@Test
public void testFolderLastModifiedTime() throws Exception {
- Path parentFolder = new Path("testFolder");
+ Path parentFolder = methodPath();
Path innerFile = new Path(parentFolder, "innerfile");
assertTrue(fs.mkdirs(parentFolder));
@@ -983,7 +1010,7 @@ public abstract class NativeAzureFileSystemBaseTest
// Make sure rename pending file is gone.
FileStatus[] listed = fs.listStatus(new Path("/"));
- assertEquals(1, listed.length);
+ assertEquals("Pending directory still found", 1, listed.length);
assertTrue(listed[0].isDirectory());
}
@@ -1681,7 +1708,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertTrue("Unanticipated exception", false);
}
} else {
- assertTrue("Unknown thread name", false);
+ fail("Unknown thread name");
}
LOG.info(name + " is exiting.");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt
deleted file mode 100644
index 54ba4d8..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-========================================================================
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-=========================================================================
-
-In order to run Windows Azure Storage Blob (WASB) unit tests against a live
-Azure Storage account, you need to provide test account details in a configuration
-file called azure-test.xml. See hadoop-tools/hadoop-azure/README.txt for details
-on configuration, and how to run the tests.
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
deleted file mode 100644
index a10a366..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.*;
-import java.util.Arrays;
-
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestAzureConcurrentOutOfBandIo {
-
- // Class constants.
- static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
- static final int UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
- static final int BLOB_SIZE = 32 * 1024 * 1024;
-
- // Number of blocks to be written before flush.
- static final int NUMBER_OF_BLOCKS = 2;
-
- protected AzureBlobStorageTestAccount testAccount;
-
- // Overridden TestCase methods.
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
- UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
- assumeNotNull(testAccount);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- class DataBlockWriter implements Runnable {
-
- Thread runner;
- AzureBlobStorageTestAccount writerStorageAccount;
- String key;
- boolean done = false;
-
- /**
- * Constructor captures the test account.
- *
- * @param testAccount
- */
- public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) {
- writerStorageAccount = testAccount;
- this.key = key;
- }
-
- /**
- * Start writing blocks to Azure storage.
- */
- public void startWriting() {
- runner = new Thread(this); // Create the block writer thread.
- runner.start(); // Start the block writer thread.
- }
-
- /**
- * Stop writing blocks to Azure storage.
- */
- public void stopWriting() {
- done = true;
- }
-
- /**
- * Implementation of the runnable interface. The run method is a tight loop
- * which repeatedly updates the blob with a 4 MB block.
- */
- public void run() {
- byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
-
- OutputStream outputStream = null;
-
- try {
- for (int i = 0; !done; i++) {
- // Write two 4 MB blocks to the blob.
- //
- outputStream = writerStorageAccount.getStore().storefile(
- key,
- new PermissionStatus("", "", FsPermission.getDefault()),
- key);
-
- Arrays.fill(dataBlockWrite, (byte) (i % 256));
- for (int j = 0; j < NUMBER_OF_BLOCKS; j++) {
- outputStream.write(dataBlockWrite);
- }
-
- outputStream.flush();
- outputStream.close();
- }
- } catch (AzureException e) {
- System.out
- .println("DatablockWriter thread encountered a storage exception."
- + e.getMessage());
- } catch (IOException e) {
- System.out
- .println("DatablockWriter thread encountered an I/O exception."
- + e.getMessage());
- }
- }
- }
-
- @Test
- public void testReadOOBWrites() throws Exception {
-
- byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
- byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
-
- // Write to blob to make sure it exists.
- //
- // Write five 4 MB blocks to the blob. To ensure there is data in the blob before
- // reading. This eliminates the race between the reader and writer threads.
- OutputStream outputStream = testAccount.getStore().storefile(
- "WASB_String.txt",
- new PermissionStatus("", "", FsPermission.getDefault()),
- "WASB_String.txt");
- Arrays.fill(dataBlockWrite, (byte) 255);
- for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
- outputStream.write(dataBlockWrite);
- }
-
- outputStream.flush();
- outputStream.close();
-
- // Start writing blocks to Azure store using the DataBlockWriter thread.
- DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount,
- "WASB_String.txt");
- writeBlockTask.startWriting();
- int count = 0;
- InputStream inputStream = null;
-
- for (int i = 0; i < 5; i++) {
- try {
- inputStream = testAccount.getStore().retrieve("WASB_String.txt");
- count = 0;
- int c = 0;
-
- while (c >= 0) {
- c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
- if (c < 0) {
- break;
- }
-
- // Counting the number of bytes.
- count += c;
- }
- } catch (IOException e) {
- System.out.println(e.getCause().toString());
- e.printStackTrace();
- fail();
- }
-
- // Close the stream.
- if (null != inputStream){
- inputStream.close();
- }
- }
-
- // Stop writing blocks.
- writeBlockTask.stopWriting();
-
- // Validate that a block was read.
- assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java
deleted file mode 100644
index 687b785..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-/**
- * Extends TestAzureConcurrentOutOfBandIo in order to run testReadOOBWrites with secure mode
- * (fs.azure.secure.mode) both enabled and disabled.
- */
-public class TestAzureConcurrentOutOfBandIoWithSecureMode extends TestAzureConcurrentOutOfBandIo {
-
- // Overridden TestCase methods.
- @Before
- @Override
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
- UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE, true);
- assumeNotNull(testAccount);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
deleted file mode 100644
index c985224..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.HttpURLConnection;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.HashMap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-
-import com.microsoft.azure.storage.OperationContext;
-import com.microsoft.azure.storage.SendingRequestEvent;
-import com.microsoft.azure.storage.StorageEvent;
-
-public class TestAzureFileSystemErrorConditions {
- private static final int ALL_THREE_FILE_SIZE = 1024;
-
- @Test
- public void testNoInitialize() throws Exception {
- AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
- boolean passed = false;
- try {
- store.retrieveMetadata("foo");
- passed = true;
- } catch (AssertionError e) {
- }
- assertFalse(
- "Doing an operation on the store should throw if not initalized.",
- passed);
- }
-
- /**
- * Try accessing an unauthorized or non-existent (treated the same) container
- * from WASB.
- */
- @Test
- public void testAccessUnauthorizedPublicContainer() throws Exception {
- final String container = "nonExistentContainer";
- final String account = "hopefullyNonExistentAccount";
- Path noAccessPath = new Path(
- "wasb://" + container + "@" + account + "/someFile");
- NativeAzureFileSystem.suppressRetryPolicy();
- try {
- FileSystem.get(noAccessPath.toUri(), new Configuration())
- .open(noAccessPath);
- assertTrue("Should've thrown.", false);
- } catch (AzureException ex) {
- GenericTestUtils.assertExceptionContains(
- String.format(NO_ACCESS_TO_CONTAINER_MSG, account, container), ex);
- } finally {
- NativeAzureFileSystem.resumeRetryPolicy();
- }
- }
-
- @Test
- public void testAccessContainerWithWrongVersion() throws Exception {
- AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
- MockStorageInterface mockStorage = new MockStorageInterface();
- store.setAzureStorageInteractionLayer(mockStorage);
- FileSystem fs = new NativeAzureFileSystem(store);
- try {
- Configuration conf = new Configuration();
- AzureBlobStorageTestAccount.setMockAccountKey(conf);
- HashMap<String, String> metadata = new HashMap<String, String>();
- metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
- "2090-04-05"); // It's from the future!
- mockStorage.addPreExistingContainer(
- AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
-
- boolean passed = false;
- try {
- fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
- fs.listStatus(new Path("/"));
- passed = true;
- } catch (AzureException ex) {
- assertTrue("Unexpected exception message: " + ex,
- ex.getMessage().contains("unsupported version: 2090-04-05."));
- }
- assertFalse("Should've thrown an exception because of the wrong version.",
- passed);
- } finally {
- fs.close();
- }
- }
-
- private interface ConnectionRecognizer {
- boolean isTargetConnection(HttpURLConnection connection);
- }
-
- private class TransientErrorInjector extends StorageEvent<SendingRequestEvent> {
- final ConnectionRecognizer connectionRecognizer;
- private boolean injectedErrorOnce = false;
-
- public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
- this.connectionRecognizer = connectionRecognizer;
- }
-
- @Override
- public void eventOccurred(SendingRequestEvent eventArg) {
- HttpURLConnection connection = (HttpURLConnection)eventArg.getConnectionObject();
- if (!connectionRecognizer.isTargetConnection(connection)) {
- return;
- }
- if (!injectedErrorOnce) {
- connection.setReadTimeout(1);
- connection.disconnect();
- injectedErrorOnce = true;
- }
- }
- }
-
- private void injectTransientError(NativeAzureFileSystem fs,
- final ConnectionRecognizer connectionRecognizer) {
- fs.getStore().addTestHookToOperationContext(new TestHookOperationContext() {
- @Override
- public OperationContext modifyOperationContext(OperationContext original) {
- original.getSendingRequestEventHandler().addListener(
- new TransientErrorInjector(connectionRecognizer));
- return original;
- }
- });
- }
-
- @Test
- public void testTransientErrorOnDelete() throws Exception {
- // Need to do this test against a live storage account
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
- try {
- NativeAzureFileSystem fs = testAccount.getFileSystem();
- injectTransientError(fs, new ConnectionRecognizer() {
- @Override
- public boolean isTargetConnection(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("DELETE");
- }
- });
- Path testFile = new Path("/a/b");
- assertTrue(fs.createNewFile(testFile));
- assertTrue(fs.rename(testFile, new Path("/x")));
- } finally {
- testAccount.cleanup();
- }
- }
-
- private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
- throws IOException {
- byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
- Arrays.fill(buffer, (byte)3);
- OutputStream stream = fs.create(testFile);
- stream.write(buffer);
- stream.close();
- }
-
- private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)
- throws IOException {
- byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
- InputStream inStream = fs.open(testFile);
- assertEquals(buffer.length,
- inStream.read(buffer, 0, buffer.length));
- inStream.close();
- for (int i = 0; i < buffer.length; i++) {
- assertEquals(3, buffer[i]);
- }
- }
-
- @Test
- public void testTransientErrorOnCommitBlockList() throws Exception {
- // Need to do this test against a live storage account
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
- try {
- NativeAzureFileSystem fs = testAccount.getFileSystem();
- injectTransientError(fs, new ConnectionRecognizer() {
- @Override
- public boolean isTargetConnection(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("PUT")
- && connection.getURL().getQuery() != null
- && connection.getURL().getQuery().contains("blocklist");
- }
- });
- Path testFile = new Path("/a/b");
- writeAllThreeFile(fs, testFile);
- readAllThreeFile(fs, testFile);
- } finally {
- testAccount.cleanup();
- }
- }
-
- @Test
- public void testTransientErrorOnRead() throws Exception {
- // Need to do this test against a live storage account
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
- try {
- NativeAzureFileSystem fs = testAccount.getFileSystem();
- Path testFile = new Path("/a/b");
- writeAllThreeFile(fs, testFile);
- injectTransientError(fs, new ConnectionRecognizer() {
- @Override
- public boolean isTargetConnection(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("GET");
- }
- });
- readAllThreeFile(fs, testFile);
- } finally {
- testAccount.cleanup();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
deleted file mode 100644
index ea17b62..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.HttpURLConnection;
-import java.util.Arrays;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
-import org.junit.After;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.Constants;
-import com.microsoft.azure.storage.OperationContext;
-import com.microsoft.azure.storage.ResponseReceivedEvent;
-import com.microsoft.azure.storage.StorageErrorCodeStrings;
-import com.microsoft.azure.storage.StorageEvent;
-import com.microsoft.azure.storage.StorageException;
-import com.microsoft.azure.storage.blob.BlockEntry;
-import com.microsoft.azure.storage.blob.BlockSearchMode;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-import com.microsoft.azure.storage.core.Base64;
-
-/**
- * Test that we do proper data integrity validation with MD5 checks as
- * configured.
- */
-public class TestBlobDataValidation {
- private AzureBlobStorageTestAccount testAccount;
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- /**
- * Test that by default we don't store the blob-level MD5.
- */
- @Test
- public void testBlobMd5StoreOffByDefault() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- testStoreBlobMd5(false);
- }
-
- /**
- * Test that we get blob-level MD5 storage and validation if we specify that
- * in the configuration.
- */
- @Test
- public void testStoreBlobMd5() throws Exception {
- Configuration conf = new Configuration();
- conf.setBoolean(KEY_STORE_BLOB_MD5, true);
- testAccount = AzureBlobStorageTestAccount.create(conf);
- testStoreBlobMd5(true);
- }
-
- private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
- assumeNotNull(testAccount);
- // Write a test file.
- String testFileKey = "testFile";
- Path testFilePath = new Path("/" + testFileKey);
- OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
- outStream.write(new byte[] { 5, 15 });
- outStream.close();
-
- // Check that we stored/didn't store the MD5 field as configured.
- CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
- blob.downloadAttributes();
- String obtainedMd5 = blob.getProperties().getContentMD5();
- if (expectMd5Stored) {
- assertNotNull(obtainedMd5);
- } else {
- assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
- }
-
- // Mess with the content so it doesn't match the MD5.
- String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
- blob.uploadBlock(newBlockId,
- new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
- blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(
- newBlockId, BlockSearchMode.UNCOMMITTED) }));
-
- // Now read back the content. If we stored the MD5 for the blob content
- // we should get a data corruption error.
- InputStream inStream = testAccount.getFileSystem().open(testFilePath);
- try {
- byte[] inBuf = new byte[100];
- while (inStream.read(inBuf) > 0){
- //nothing;
- }
- inStream.close();
- if (expectMd5Stored) {
- fail("Should've thrown because of data corruption.");
- }
- } catch (IOException ex) {
- if (!expectMd5Stored) {
- throw ex;
- }
- StorageException cause = (StorageException)ex.getCause();
- assertNotNull(cause);
- assertEquals("Unexpected cause: " + cause,
- StorageErrorCodeStrings.INVALID_MD5, cause.getErrorCode());
- }
- }
-
- /**
- * Test that by default we check block-level MD5.
- */
- @Test
- public void testCheckBlockMd5() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- testCheckBlockMd5(true);
- }
-
- /**
- * Test that we don't check block-level MD5 if we specify that in the
- * configuration.
- */
- @Test
- public void testDontCheckBlockMd5() throws Exception {
- Configuration conf = new Configuration();
- conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
- testAccount = AzureBlobStorageTestAccount.create(conf);
- testCheckBlockMd5(false);
- }
-
- /**
- * Connection inspector to check that MD5 fields for content is set/not set as
- * expected.
- */
- private static class ContentMD5Checker extends
- StorageEvent<ResponseReceivedEvent> {
- private final boolean expectMd5;
-
- public ContentMD5Checker(boolean expectMd5) {
- this.expectMd5 = expectMd5;
- }
-
- @Override
- public void eventOccurred(ResponseReceivedEvent eventArg) {
- HttpURLConnection connection = (HttpURLConnection) eventArg
- .getConnectionObject();
- if (isGetRange(connection)) {
- checkObtainedMd5(connection
- .getHeaderField(Constants.HeaderConstants.CONTENT_MD5));
- } else if (isPutBlock(connection)) {
- checkObtainedMd5(connection
- .getRequestProperty(Constants.HeaderConstants.CONTENT_MD5));
- }
- }
-
- private void checkObtainedMd5(String obtainedMd5) {
- if (expectMd5) {
- assertNotNull(obtainedMd5);
- } else {
- assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
- }
- }
-
- private static boolean isPutBlock(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("PUT")
- && connection.getURL().getQuery() != null
- && connection.getURL().getQuery().contains("blockid");
- }
-
- private static boolean isGetRange(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("GET")
- && connection
- .getHeaderField(Constants.HeaderConstants.STORAGE_RANGE_HEADER) != null;
- }
- }
-
- private void testCheckBlockMd5(final boolean expectMd5Checked)
- throws Exception {
- assumeNotNull(testAccount);
- Path testFilePath = new Path("/testFile");
-
- // Add a hook to check that for GET/PUT requests we set/don't set
- // the block-level MD5 field as configured. I tried to do clever
- // testing by also messing with the raw data to see if we actually
- // validate the data as expected, but the HttpURLConnection wasn't
- // pluggable enough for me to do that.
- testAccount.getFileSystem().getStore()
- .addTestHookToOperationContext(new TestHookOperationContext() {
- @Override
- public OperationContext modifyOperationContext(
- OperationContext original) {
- original.getResponseReceivedEventHandler().addListener(
- new ContentMD5Checker(expectMd5Checked));
- return original;
- }
- });
-
- OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
- outStream.write(new byte[] { 5, 15 });
- outStream.close();
-
- InputStream inStream = testAccount.getFileSystem().open(testFilePath);
- byte[] inBuf = new byte[100];
- while (inStream.read(inBuf) > 0){
- //nothing;
- }
- inStream.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
index 6c49926..30c1028 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
@@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
@@ -42,7 +37,7 @@ import org.junit.Test;
/**
* Tests that we put the correct metadata on blobs created through WASB.
*/
-public class TestBlobMetadata {
+public class TestBlobMetadata extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
index 07d4ebc..aca5f81 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
@@ -33,9 +33,6 @@ import org.junit.Test;
import java.net.HttpURLConnection;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertEquals;
-
/**
* Tests for <code>BlobOperationDescriptor</code>.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
deleted file mode 100644
index afb16ef..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.*;
-import java.util.*;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
-
-import junit.framework.*;
-
-import org.junit.Test;
-
-
-/**
- * A simple benchmark to find out the difference in speed between block
- * and page blobs.
- */
-public class TestBlobTypeSpeedDifference extends TestCase {
- /**
- * Writes data to the given stream of the given size, flushing every
- * x bytes.
- */
- private static void writeTestFile(OutputStream writeStream,
- long size, long flushInterval) throws IOException {
- int bufferSize = (int) Math.min(1000, flushInterval);
- byte[] buffer = new byte[bufferSize];
- Arrays.fill(buffer, (byte) 7);
- int bytesWritten = 0;
- int bytesUnflushed = 0;
- while (bytesWritten < size) {
- int numberToWrite = (int) Math.min(bufferSize, size - bytesWritten);
- writeStream.write(buffer, 0, numberToWrite);
- bytesWritten += numberToWrite;
- bytesUnflushed += numberToWrite;
- if (bytesUnflushed >= flushInterval) {
- writeStream.flush();
- bytesUnflushed = 0;
- }
- }
- }
-
- private static class TestResult {
- final long timeTakenInMs;
- final long totalNumberOfRequests;
-
- TestResult(long timeTakenInMs, long totalNumberOfRequests) {
- this.timeTakenInMs = timeTakenInMs;
- this.totalNumberOfRequests = totalNumberOfRequests;
- }
- }
-
- /**
- * Writes data to the given file of the given size, flushing every
- * x bytes. Measure performance of that and return it.
- */
- private static TestResult writeTestFile(NativeAzureFileSystem fs, Path path,
- long size, long flushInterval) throws IOException {
- AzureFileSystemInstrumentation instrumentation =
- fs.getInstrumentation();
- long initialRequests = instrumentation.getCurrentWebResponses();
- Date start = new Date();
- OutputStream output = fs.create(path);
- writeTestFile(output, size, flushInterval);
- output.close();
- long finalRequests = instrumentation.getCurrentWebResponses();
- return new TestResult(new Date().getTime() - start.getTime(),
- finalRequests - initialRequests);
- }
-
- /**
- * Writes data to a block blob of the given size, flushing every
- * x bytes. Measure performance of that and return it.
- */
- private static TestResult writeBlockBlobTestFile(NativeAzureFileSystem fs,
- long size, long flushInterval) throws IOException {
- return writeTestFile(fs, new Path("/blockBlob"), size, flushInterval);
- }
-
- /**
- * Writes data to a page blob of the given size, flushing every
- * x bytes. Measure performance of that and return it.
- */
- private static TestResult writePageBlobTestFile(NativeAzureFileSystem fs,
- long size, long flushInterval) throws IOException {
- return writeTestFile(fs,
- AzureBlobStorageTestAccount.pageBlobPath("pageBlob"),
- size, flushInterval);
- }
-
- /**
- * Runs the benchmark over a small 10 KB file, flushing every 500 bytes.
- */
- @Test
- public void testTenKbFileFrequentFlush() throws Exception {
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- if (testAccount == null) {
- return;
- }
- try {
- testForSizeAndFlushInterval(testAccount.getFileSystem(), 10 * 1000, 500);
- } finally {
- testAccount.cleanup();
- }
- }
-
- /**
- * Runs the benchmark for the given file size and flush frequency.
- */
- private static void testForSizeAndFlushInterval(NativeAzureFileSystem fs,
- final long size, final long flushInterval) throws IOException {
- for (int i = 0; i < 5; i++) {
- TestResult pageBlobResults = writePageBlobTestFile(fs, size, flushInterval);
- System.out.printf(
- "Page blob upload took %d ms. Total number of requests: %d.\n",
- pageBlobResults.timeTakenInMs, pageBlobResults.totalNumberOfRequests);
- TestResult blockBlobResults = writeBlockBlobTestFile(fs, size, flushInterval);
- System.out.printf(
- "Block blob upload took %d ms. Total number of requests: %d.\n",
- blockBlobResults.timeTakenInMs, blockBlobResults.totalNumberOfRequests);
- }
- }
-
- /**
- * Runs the benchmark for the given file size and flush frequency from the
- * command line.
- */
- public static void main(String argv[]) throws Exception {
- Configuration conf = new Configuration();
- long size = 10 * 1000 * 1000;
- long flushInterval = 2000;
- if (argv.length > 0) {
- size = Long.parseLong(argv[0]);
- }
- if (argv.length > 1) {
- flushInterval = Long.parseLong(argv[1]);
- }
- testForSizeAndFlushInterval((NativeAzureFileSystem)FileSystem.get(conf),
- size, flushInterval);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
deleted file mode 100644
index 0ae4012..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.EOFException;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Random;
-import java.util.concurrent.Callable;
-
-import org.junit.FixMethodOrder;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runners.MethodSorters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import static org.apache.hadoop.test.LambdaTestUtils.*;
-
-/**
- * Test semantics and performance of the original block blob input stream
- * (KEY_INPUT_STREAM_VERSION=1) and the new
- * <code>BlockBlobInputStream</code> (KEY_INPUT_STREAM_VERSION=2).
- */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-
-public class TestBlockBlobInputStream extends AbstractWasbTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(
- TestBlockBlobInputStream.class);
- private static final int KILOBYTE = 1024;
- private static final int MEGABYTE = KILOBYTE * KILOBYTE;
- private static final int TEST_FILE_SIZE = 6 * MEGABYTE;
- private static final Path TEST_FILE_PATH = new Path(
- "TestBlockBlobInputStream.txt");
-
- private AzureBlobStorageTestAccount accountUsingInputStreamV1;
- private AzureBlobStorageTestAccount accountUsingInputStreamV2;
- private long testFileLength;
-
- /**
- * Long test timeout.
- */
- @Rule
- public Timeout testTimeout = new Timeout(10 * 60 * 1000);
- private FileStatus testFileStatus;
- private Path hugefile;
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- Configuration conf = new Configuration();
- conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
-
- accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
- conf,
- true);
-
- accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
- null,
- true);
-
- assumeNotNull(accountUsingInputStreamV1);
- assumeNotNull(accountUsingInputStreamV2);
- hugefile = fs.makeQualified(TEST_FILE_PATH);
- try {
- testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
- testFileLength = testFileStatus.getLen();
- } catch (FileNotFoundException e) {
- // file doesn't exist
- testFileLength = 0;
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
-
- accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
- conf,
- true);
-
- accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
- null,
- true);
-
- assumeNotNull(accountUsingInputStreamV1);
- assumeNotNull(accountUsingInputStreamV2);
- return accountUsingInputStreamV1;
- }
-
- /**
- * Create a test file by repeating the characters in the alphabet.
- * @throws IOException
- */
- private void createTestFileAndSetLength() throws IOException {
- FileSystem fs = accountUsingInputStreamV1.getFileSystem();
-
- // To reduce test run time, the test file can be reused.
- if (fs.exists(TEST_FILE_PATH)) {
- testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
- testFileLength = testFileStatus.getLen();
- LOG.info("Reusing test file: {}", testFileStatus);
- return;
- }
-
- int sizeOfAlphabet = ('z' - 'a' + 1);
- byte[] buffer = new byte[26 * KILOBYTE];
- char character = 'a';
- for (int i = 0; i < buffer.length; i++) {
- buffer[i] = (byte) character;
- character = (character == 'z') ? 'a' : (char) ((int) character + 1);
- }
-
- LOG.info("Creating test file {} of size: {}", TEST_FILE_PATH,
- TEST_FILE_SIZE);
- ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
-
- try(FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) {
- int bytesWritten = 0;
- while (bytesWritten < TEST_FILE_SIZE) {
- outputStream.write(buffer);
- bytesWritten += buffer.length;
- }
- LOG.info("Closing stream {}", outputStream);
- ContractTestUtils.NanoTimer closeTimer
- = new ContractTestUtils.NanoTimer();
- outputStream.close();
- closeTimer.end("time to close() output stream");
- }
- timer.end("time to write %d KB", TEST_FILE_SIZE / 1024);
- testFileLength = fs.getFileStatus(TEST_FILE_PATH).getLen();
- }
-
- void assumeHugeFileExists() throws IOException {
- ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
- FileStatus status = fs.getFileStatus(hugefile);
- ContractTestUtils.assertIsFile(hugefile, status);
- assertTrue("File " + hugefile + " is empty", status.getLen() > 0);
- }
-
- /**
- * Calculate megabits per second from the specified values for bytes and
- * milliseconds.
- * @param bytes The number of bytes.
- * @param milliseconds The number of milliseconds.
- * @return The number of megabits per second.
- */
- private static double toMbps(long bytes, long milliseconds) {
- return bytes / 1000.0 * 8 / milliseconds;
- }
-
- @Test
- public void test_0100_CreateHugeFile() throws IOException {
- createTestFileAndSetLength();
- }
-
- @Test
- public void test_0200_BasicReadTest() throws Exception {
- assumeHugeFileExists();
-
- try (
- FSDataInputStream inputStreamV1
- = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
-
- FSDataInputStream inputStreamV2
- = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
- ) {
- byte[] bufferV1 = new byte[3 * MEGABYTE];
- byte[] bufferV2 = new byte[bufferV1.length];
-
- // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
- inputStreamV1.seek(5 * MEGABYTE);
- int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
- assertEquals(KILOBYTE, numBytesReadV1);
-
- // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
- inputStreamV2.seek(5 * MEGABYTE);
- int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
- assertEquals(KILOBYTE, numBytesReadV2);
-
- assertArrayEquals(bufferV1, bufferV2);
-
- int len = MEGABYTE;
- int offset = bufferV1.length - len;
-
- // v1 reverse seek and read a megabyte into last megabyte of bufferV1
- inputStreamV1.seek(3 * MEGABYTE);
- numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
- assertEquals(len, numBytesReadV1);
-
- // v2 reverse seek and read a megabyte into last megabyte of bufferV2
- inputStreamV2.seek(3 * MEGABYTE);
- numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
- assertEquals(len, numBytesReadV2);
-
- assertArrayEquals(bufferV1, bufferV2);
- }
- }
-
- @Test
- public void test_0201_RandomReadTest() throws Exception {
- assumeHugeFileExists();
-
- try (
- FSDataInputStream inputStreamV1
- = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
-
- FSDataInputStream inputStreamV2
- = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
- ) {
- final int bufferSize = 4 * KILOBYTE;
- byte[] bufferV1 = new byte[bufferSize];
- byte[] bufferV2 = new byte[bufferV1.length];
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- inputStreamV1.seek(0);
- inputStreamV2.seek(0);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- int seekPosition = 2 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- inputStreamV1.seek(0);
- inputStreamV2.seek(0);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- seekPosition = 5 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- seekPosition = 10 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- seekPosition = 4100 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
- }
- }
-
- private void verifyConsistentReads(FSDataInputStream inputStreamV1,
- FSDataInputStream inputStreamV2,
- byte[] bufferV1,
- byte[] bufferV2) throws IOException {
- int size = bufferV1.length;
- final int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, size);
- assertEquals("Bytes read from V1 stream", size, numBytesReadV1);
-
- final int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, size);
- assertEquals("Bytes read from V2 stream", size, numBytesReadV2);
-
- assertArrayEquals("Mismatch in read data", bufferV1, bufferV2);
- }
-
- /**
- * Validates the implementation of InputStream.markSupported.
- * @throws IOException
- */
- @Test
- public void test_0301_MarkSupportedV1() throws IOException {
- validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.markSupported.
- * @throws IOException
- */
- @Test
- public void test_0302_MarkSupportedV2() throws IOException {
- validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
- }
-
- private void validateMarkSupported(FileSystem fs) throws IOException {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- assertTrue("mark is not supported", inputStream.markSupported());
- }
- }
-
- /**
- * Validates the implementation of InputStream.mark and reset
- * for version 1 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0303_MarkAndResetV1() throws Exception {
- validateMarkAndReset(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.mark and reset
- * for version 2 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0304_MarkAndResetV2() throws Exception {
- validateMarkAndReset(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateMarkAndReset(FileSystem fs) throws Exception {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- inputStream.mark(KILOBYTE - 1);
-
- byte[] buffer = new byte[KILOBYTE];
- int bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
-
- inputStream.reset();
- assertEquals("rest -> pos 0", 0, inputStream.getPos());
-
- inputStream.mark(8 * KILOBYTE - 1);
-
- buffer = new byte[8 * KILOBYTE];
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
-
- intercept(IOException.class,
- "Resetting to invalid mark",
- new Callable<FSDataInputStream>() {
- @Override
- public FSDataInputStream call() throws Exception {
- inputStream.reset();
- return inputStream;
- }
- }
- );
- }
- }
-
- /**
- * Validates the implementation of Seekable.seekToNewSource, which should
- * return false for version 1 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0305_SeekToNewSourceV1() throws IOException {
- validateSeekToNewSource(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of Seekable.seekToNewSource, which should
- * return false for version 2 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0306_SeekToNewSourceV2() throws IOException {
- validateSeekToNewSource(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSeekToNewSource(FileSystem fs) throws IOException {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- assertFalse(inputStream.seekToNewSource(0));
- }
- }
-
- /**
- * Validates the implementation of InputStream.skip and ensures there is no
- * network I/O for version 1 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0307_SkipBoundsV1() throws Exception {
- validateSkipBounds(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.skip and ensures there is no
- * network I/O for version 2 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0308_SkipBoundsV2() throws Exception {
- validateSkipBounds(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSkipBounds(FileSystem fs) throws Exception {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- NanoTimer timer = new NanoTimer();
-
- long skipped = inputStream.skip(-1);
- assertEquals(0, skipped);
-
- skipped = inputStream.skip(0);
- assertEquals(0, skipped);
-
- assertTrue(testFileLength > 0);
-
- skipped = inputStream.skip(testFileLength);
- assertEquals(testFileLength, skipped);
-
- intercept(EOFException.class,
- new Callable<Long>() {
- @Override
- public Long call() throws Exception {
- return inputStream.skip(1);
- }
- }
- );
- long elapsedTimeMs = timer.elapsedTimeMs();
- assertTrue(
- String.format(
- "There should not be any network I/O (elapsedTimeMs=%1$d).",
- elapsedTimeMs),
- elapsedTimeMs < 20);
- }
- }
-
- /**
- * Validates the implementation of Seekable.seek and ensures there is no
- * network I/O for forward seek.
- * @throws Exception
- */
- @Test
- public void test_0309_SeekBoundsV1() throws Exception {
- validateSeekBounds(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of Seekable.seek and ensures there is no
- * network I/O for forward seek.
- * @throws Exception
- */
- @Test
- public void test_0310_SeekBoundsV2() throws Exception {
- validateSeekBounds(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSeekBounds(FileSystem fs) throws Exception {
- assumeHugeFileExists();
- try (
- FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
- ) {
- NanoTimer timer = new NanoTimer();
-
- inputStream.seek(0);
- assertEquals(0, inputStream.getPos());
-
- intercept(EOFException.class,
- FSExceptionMessages.NEGATIVE_SEEK,
- new Callable<FSDataInputStream>() {
- @Override
- public FSDataInputStream call() throws Exception {
- inputStream.seek(-1);
- return inputStream;
- }
- }
- );
-
- assertTrue("Test file length only " + testFileLength, testFileLength > 0);
- inputStream.seek(testFileLength);
- assertEquals(testFileLength, inputStream.getPos());
-
- intercept(EOFException.class,
- FSExceptionMessages.CANNOT_SEEK_PAST_EOF,
- new Callable<FSDataInputStream>() {
- @Override
- public FSDataInputStream call() throws Exception {
- inputStream.seek(testFileLength + 1);
- return inputStream;
- }
- }
- );
-
- long elapsedTimeMs = timer.elapsedTimeMs();
- assertTrue(
- String.format(
- "There should not be any network I/O (elapsedTimeMs=%1$d).",
- elapsedTimeMs),
- elapsedTimeMs < 20);
- }
- }
-
- /**
- * Validates the implementation of Seekable.seek, Seekable.getPos,
- * and InputStream.available.
- * @throws Exception
- */
- @Test
- public void test_0311_SeekAndAvailableAndPositionV1() throws Exception {
- validateSeekAndAvailableAndPosition(
- accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of Seekable.seek, Seekable.getPos,
- * and InputStream.available.
- * @throws Exception
- */
- @Test
- public void test_0312_SeekAndAvailableAndPositionV2() throws Exception {
- validateSeekAndAvailableAndPosition(
- accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSeekAndAvailableAndPosition(FileSystem fs)
- throws Exception {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
- byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
- byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
- byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
- byte[] buffer = new byte[3];
-
- int bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected1, buffer);
- assertEquals(buffer.length, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected2, buffer);
- assertEquals(2 * buffer.length, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // reverse seek
- int seekPos = 0;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected1, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // reverse seek
- seekPos = 1;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected3, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // forward seek
- seekPos = 6;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected4, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- }
- }
-
- /**
- * Validates the implementation of InputStream.skip, Seekable.getPos,
- * and InputStream.available.
- * @throws IOException
- */
- @Test
- public void test_0313_SkipAndAvailableAndPositionV1() throws IOException {
- validateSkipAndAvailableAndPosition(
- accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.skip, Seekable.getPos,
- * and InputStream.available.
- * @throws IOException
- */
- @Test
- public void test_0314_SkipAndAvailableAndPositionV2() throws IOException {
- validateSkipAndAvailableAndPosition(
- accountUsingInputStreamV1.getFileSystem());
- }
-
- private void validateSkipAndAvailableAndPosition(FileSystem fs)
- throws IOException {
- assumeHugeFileExists();
- try (
- FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
- ) {
- byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
- byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
- byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
- byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
-
- assertEquals(testFileLength, inputStream.available());
- assertEquals(0, inputStream.getPos());
-
- int n = 3;
- long skipped = inputStream.skip(n);
-
- assertEquals(skipped, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- assertEquals(skipped, n);
-
- byte[] buffer = new byte[3];
- int bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected2, buffer);
- assertEquals(buffer.length + skipped, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // does skip still work after seek?
- int seekPos = 1;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected3, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- long currentPosition = inputStream.getPos();
- n = 2;
- skipped = inputStream.skip(n);
-
- assertEquals(currentPosition + skipped, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- assertEquals(skipped, n);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected4, buffer);
- assertEquals(buffer.length + skipped + currentPosition,
- inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- }
- }
-
- /**
- * Ensures parity in the performance of sequential read for
- * version 1 and version 2 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0315_SequentialReadPerformance() throws IOException {
- assumeHugeFileExists();
- final int maxAttempts = 10;
- final double maxAcceptableRatio = 1.01;
- double v1ElapsedMs = 0, v2ElapsedMs = 0;
- double ratio = Double.MAX_VALUE;
- for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
- v1ElapsedMs = sequentialRead(1,
- accountUsingInputStreamV1.getFileSystem(), false);
- v2ElapsedMs = sequentialRead(2,
- accountUsingInputStreamV2.getFileSystem(), false);
- ratio = v2ElapsedMs / v1ElapsedMs;
- LOG.info(String.format(
- "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio));
- }
- assertTrue(String.format(
- "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
- + " v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio),
- ratio < maxAcceptableRatio);
- }
-
- /**
- * Ensures parity in the performance of sequential read after reverse seek for
- * version 2 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0316_SequentialReadAfterReverseSeekPerformanceV2()
- throws IOException {
- assumeHugeFileExists();
- final int maxAttempts = 10;
- final double maxAcceptableRatio = 1.01;
- double beforeSeekElapsedMs = 0, afterSeekElapsedMs = 0;
- double ratio = Double.MAX_VALUE;
- for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
- beforeSeekElapsedMs = sequentialRead(2,
- accountUsingInputStreamV2.getFileSystem(), false);
- afterSeekElapsedMs = sequentialRead(2,
- accountUsingInputStreamV2.getFileSystem(), true);
- ratio = afterSeekElapsedMs / beforeSeekElapsedMs;
- LOG.info(String.format(
- "beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d, ratio=%3$.2f",
- (long) beforeSeekElapsedMs,
- (long) afterSeekElapsedMs,
- ratio));
- }
- assertTrue(String.format(
- "Performance of version 2 after reverse seek is not acceptable:"
- + " beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d,"
- + " ratio=%3$.2f",
- (long) beforeSeekElapsedMs,
- (long) afterSeekElapsedMs,
- ratio),
- ratio < maxAcceptableRatio);
- }
-
- private long sequentialRead(int version,
- FileSystem fs,
- boolean afterReverseSeek) throws IOException {
- byte[] buffer = new byte[16 * KILOBYTE];
- long totalBytesRead = 0;
- long bytesRead = 0;
-
- try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- if (afterReverseSeek) {
- while (bytesRead > 0 && totalBytesRead < 4 * MEGABYTE) {
- bytesRead = inputStream.read(buffer);
- totalBytesRead += bytesRead;
- }
- totalBytesRead = 0;
- inputStream.seek(0);
- }
-
- NanoTimer timer = new NanoTimer();
- while ((bytesRead = inputStream.read(buffer)) > 0) {
- totalBytesRead += bytesRead;
- }
- long elapsedTimeMs = timer.elapsedTimeMs();
-
- LOG.info(String.format(
- "v%1$d: bytesRead=%2$d, elapsedMs=%3$d, Mbps=%4$.2f,"
- + " afterReverseSeek=%5$s",
- version,
- totalBytesRead,
- elapsedTimeMs,
- toMbps(totalBytesRead, elapsedTimeMs),
- afterReverseSeek));
-
- assertEquals(testFileLength, totalBytesRead);
- inputStream.close();
- return elapsedTimeMs;
- }
- }
-
- @Test
- public void test_0317_RandomReadPerformance() throws IOException {
- assumeHugeFileExists();
- final int maxAttempts = 10;
- final double maxAcceptableRatio = 0.10;
- double v1ElapsedMs = 0, v2ElapsedMs = 0;
- double ratio = Double.MAX_VALUE;
- for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
- v1ElapsedMs = randomRead(1,
- accountUsingInputStreamV1.getFileSystem());
- v2ElapsedMs = randomRead(2,
- accountUsingInputStreamV2.getFileSystem());
- ratio = v2ElapsedMs / v1ElapsedMs;
- LOG.info(String.format(
- "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio));
- }
- assertTrue(String.format(
- "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
- + " v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio),
- ratio < maxAcceptableRatio);
- }
-
- private long randomRead(int version, FileSystem fs) throws IOException {
- assumeHugeFileExists();
- final int minBytesToRead = 2 * MEGABYTE;
- Random random = new Random();
- byte[] buffer = new byte[8 * KILOBYTE];
- long totalBytesRead = 0;
- long bytesRead = 0;
- try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- NanoTimer timer = new NanoTimer();
-
- do {
- bytesRead = inputStream.read(buffer);
- totalBytesRead += bytesRead;
- inputStream.seek(random.nextInt(
- (int) (testFileLength - buffer.length)));
- } while (bytesRead > 0 && totalBytesRead < minBytesToRead);
-
- long elapsedTimeMs = timer.elapsedTimeMs();
-
- inputStream.close();
-
- LOG.info(String.format(
- "v%1$d: totalBytesRead=%2$d, elapsedTimeMs=%3$d, Mbps=%4$.2f",
- version,
- totalBytesRead,
- elapsedTimeMs,
- toMbps(totalBytesRead, elapsedTimeMs)));
-
- assertTrue(minBytesToRead <= totalBytesRead);
-
- return elapsedTimeMs;
- }
- }
-
- @Test
- public void test_999_DeleteHugeFiles() throws IOException {
- ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
- fs.delete(TEST_FILE_PATH, false);
- timer.end("time to delete %s", TEST_FILE_PATH);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
index 307e5af..c2496d7 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
@@ -21,13 +21,10 @@ package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
import org.junit.Test;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-
/**
* Tests for <code>ClientThrottlingAnalyzer</code>.
*/
-public class TestClientThrottlingAnalyzer {
+public class TestClientThrottlingAnalyzer extends AbstractWasbTestWithTimeout {
private static final int ANALYSIS_PERIOD = 1000;
private static final int ANALYSIS_PERIOD_PLUS_10_PERCENT = ANALYSIS_PERIOD
+ ANALYSIS_PERIOD / 10;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
deleted file mode 100644
index 8aad9e9..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ /dev/null
@@ -1,569 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.io.retry.RetryUtils;
-import org.apache.http.Header;
-import org.apache.http.HttpResponse;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpStatus;
-import org.apache.http.StatusLine;
-import org.apache.http.ProtocolVersion;
-import org.apache.http.ParseException;
-import org.apache.http.HeaderElement;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.hamcrest.Description;
-import org.hamcrest.TypeSafeMatcher;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Mockito;
-
-import java.io.ByteArrayInputStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.StandardCharsets;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.times;
-
-/**
- * Test class to hold all WasbRemoteCallHelper tests
- */
-public class TestWasbRemoteCallHelper
- extends AbstractWasbTestBase {
- public static final String EMPTY_STRING = "";
- private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = new Configuration();
- conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
- conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/,http://localhost:8080");
- return AzureBlobStorageTestAccount.create(conf);
- }
-
- @Before
- public void beforeMethod() {
- boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
- boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
- Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
- useSecureMode && useAuthorization);
-
- Assume.assumeTrue(
- useSecureMode && useAuthorization
- );
- }
-
- @Rule
- public ExpectedException expectedEx = ExpectedException.none();
-
- /**
- * Test invalid status-code
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testInvalidStatusCode() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test invalid Content-Type
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testInvalidContentType() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "text/plain"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test missing Content-Length
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testMissingContentLength() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test Content-Length exceeds max
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testContentLengthExceedsMax() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "2048"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test invalid Content-Length value
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testInvalidContentLengthValue() throws Throwable {
-
- setupExpectations();
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "20abc48"));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test valid JSON response
- * @throws Throwable
- */
- @Test
- public void testValidJSONResponse() throws Throwable {
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
-
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test malformed JSON response
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testMalFormedJSONResponse() throws Throwable {
-
- expectedEx.expect(WasbAuthorizationException.class);
- expectedEx.expectMessage("com.fasterxml.jackson.core.JsonParseException: Unexpected end-of-input in FIELD_NAME");
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
-
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(malformedJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- /**
- * Test valid JSON response failure response code
- * @throws Throwable
- */
- @Test // (expected = WasbAuthorizationException.class)
- public void testFailureCodeJSONResponse() throws Throwable {
-
- expectedEx.expect(WasbAuthorizationException.class);
- expectedEx.expectMessage("Remote authorization service encountered an error Unauthorized");
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
-
- HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
- Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(failureCodeJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
- }
-
- @Test
- public void testWhenOneInstanceIsDown() throws Throwable {
-
- boolean isAuthorizationCachingEnabled = fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, false);
-
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService1.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService1.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService2.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_OK));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService2.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseServiceLocal = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseServiceLocal.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseServiceLocal.getEntity())
- .thenReturn(mockHttpEntity);
-
-
-
- class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost1");
- }
- }
- class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost2");
- }
- }
- class HttpGetForServiceLocal extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- try {
- return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
- } catch (UnknownHostException e) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost");
- }
- }
- }
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
- .thenReturn(mockHttpResponseService1);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
- .thenReturn(mockHttpResponseService2);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal())))
- .thenReturn(mockHttpResponseServiceLocal);
-
- //Need 2 times because performop() does 2 fs operations.
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(validJsonResponse()
- .getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse()
- .getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(validJsonResponse()
- .getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
-
- performop(mockHttpClient);
-
- int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 1 : 2;
- Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
- Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForService2()));
- }
-
- @Test
- public void testWhenServiceInstancesAreDown() throws Throwable {
- //expectedEx.expect(WasbAuthorizationException.class);
- // set up mocks
- HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
- HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
-
- HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService1.getStatusLine())
- .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService1.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService2.getStatusLine())
- .thenReturn(newStatusLine(
- HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService2.getEntity())
- .thenReturn(mockHttpEntity);
-
- HttpResponse mockHttpResponseService3 = Mockito.mock(HttpResponse.class);
- Mockito.when(mockHttpResponseService3.getStatusLine())
- .thenReturn(newStatusLine(
- HttpStatus.SC_INTERNAL_SERVER_ERROR));
- Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Type"))
- .thenReturn(newHeader("Content-Type", "application/json"));
- Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Length"))
- .thenReturn(newHeader("Content-Length", "1024"));
- Mockito.when(mockHttpResponseService3.getEntity())
- .thenReturn(mockHttpEntity);
-
- class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost1");
- }
- }
- class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
- @Override public boolean matches(Object o) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost2");
- }
- }
- class HttpGetForService3 extends ArgumentMatcher<HttpGet> {
- @Override public boolean matches(Object o){
- try {
- return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
- } catch (UnknownHostException e) {
- return checkHttpGetMatchHost((HttpGet) o, "localhost");
- }
- }
- }
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
- .thenReturn(mockHttpResponseService1);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
- .thenReturn(mockHttpResponseService2);
- Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService3())))
- .thenReturn(mockHttpResponseService3);
-
- //Need 3 times because performop() does 3 fs operations.
- Mockito.when(mockHttpEntity.getContent())
- .thenReturn(new ByteArrayInputStream(
- validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(
- validJsonResponse().getBytes(StandardCharsets.UTF_8)))
- .thenReturn(new ByteArrayInputStream(
- validJsonResponse().getBytes(StandardCharsets.UTF_8)));
- // finished setting up mocks
- try {
- performop(mockHttpClient);
- }catch (WasbAuthorizationException e){
- e.printStackTrace();
- Mockito.verify(mockHttpClient, atLeast(2))
- .execute(argThat(new HttpGetForService1()));
- Mockito.verify(mockHttpClient, atLeast(2))
- .execute(argThat(new HttpGetForService2()));
- Mockito.verify(mockHttpClient, atLeast(3))
- .execute(argThat(new HttpGetForService3()));
- Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any());
- }
- }
-
- private void setupExpectations() {
- expectedEx.expect(WasbAuthorizationException.class);
-
- class MatchesPattern extends TypeSafeMatcher<String> {
- private String pattern;
-
- MatchesPattern(String pattern) {
- this.pattern = pattern;
- }
-
- @Override protected boolean matchesSafely(String item) {
- return item.matches(pattern);
- }
-
- @Override public void describeTo(Description description) {
- description.appendText("matches pattern ").appendValue(pattern);
- }
-
- @Override protected void describeMismatchSafely(String item,
- Description mismatchDescription) {
- mismatchDescription.appendText("does not match");
- }
- }
-
- expectedEx.expectMessage(new MatchesPattern(
- "org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: "
- + "Encountered error while making remote call to "
- + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/,http:\\/\\/localhost:8080 retried 6 time\\(s\\)\\."));
- }
-
- private void performop(HttpClient mockHttpClient) throws Throwable {
-
- Path testPath = new Path("/", "test.dat");
-
- RemoteWasbAuthorizerImpl authorizer = new RemoteWasbAuthorizerImpl();
- authorizer.init(fs.getConf());
- WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper(
- RetryUtils.getMultipleLinearRandomRetry(new Configuration(),
- EMPTY_STRING, true,
- EMPTY_STRING, "1000,3,10000,2"));
- mockWasbRemoteCallHelper.updateHttpClient(mockHttpClient);
- authorizer.updateWasbRemoteCallHelper(mockWasbRemoteCallHelper);
- fs.updateWasbAuthorizer(authorizer);
-
- fs.create(testPath);
- ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
- fs.delete(testPath, false);
- }
-
- private String validJsonResponse() {
- return "{"
- + "\"responseCode\": 0,"
- + "\"authorizationResult\": true,"
- + "\"responseMessage\": \"Authorized\""
- + "}";
- }
-
- private String malformedJsonResponse() {
- return "{"
- + "\"responseCode\": 0,"
- + "\"authorizationResult\": true,"
- + "\"responseMessage\":";
- }
-
- private String failureCodeJsonResponse() {
- return "{"
- + "\"responseCode\": 1,"
- + "\"authorizationResult\": false,"
- + "\"responseMessage\": \"Unauthorized\""
- + "}";
- }
-
- private StatusLine newStatusLine(int statusCode) {
- return new StatusLine() {
- @Override
- public ProtocolVersion getProtocolVersion() {
- return new ProtocolVersion("HTTP", 1, 1);
- }
-
- @Override
- public int getStatusCode() {
- return statusCode;
- }
-
- @Override
- public String getReasonPhrase() {
- return "Reason Phrase";
- }
- };
- }
-
- private Header newHeader(String name, String value) {
- return new Header() {
- @Override
- public String getName() {
- return name;
- }
-
- @Override
- public String getValue() {
- return value;
- }
-
- @Override
- public HeaderElement[] getElements() throws ParseException {
- return new HeaderElement[0];
- }
- };
- }
-
- /** Check that a HttpGet request is with given remote host. */
- private static boolean checkHttpGetMatchHost(HttpGet g, String h) {
- return g != null && g.getURI().getHost().equals(h);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
deleted file mode 100644
index 672ed9c..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ /dev/null
@@ -1,617 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.Date;
-import java.util.EnumSet;
-import java.io.File;
-
-import org.apache.hadoop.security.ProviderUtils;
-import org.apache.hadoop.security.alias.CredentialProvider;
-import org.apache.hadoop.security.alias.CredentialProviderFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.AbstractFileSystem;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.microsoft.azure.storage.blob.CloudBlobContainer;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-public class TestWasbUriAndConfiguration {
-
- private static final int FILE_SIZE = 4096;
- private static final String PATH_DELIMITER = "/";
-
- protected String accountName;
- protected String accountKey;
- protected static Configuration conf = null;
- private boolean runningInSASMode = false;
- @Rule
- public final TemporaryFolder tempDir = new TemporaryFolder();
-
- private AzureBlobStorageTestAccount testAccount;
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- @Before
- public void setMode() {
- runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
- getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
- }
-
- private boolean validateIOStreams(Path filePath) throws IOException {
- // Capture the file system from the test account.
- FileSystem fs = testAccount.getFileSystem();
- return validateIOStreams(fs, filePath);
- }
-
- private boolean validateIOStreams(FileSystem fs, Path filePath)
- throws IOException {
-
- // Create and write a file
- OutputStream outputStream = fs.create(filePath);
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
-
- // Return true if the the count is equivalent to the file size.
- return (FILE_SIZE == readInputStream(fs, filePath));
- }
-
- private int readInputStream(Path filePath) throws IOException {
- // Capture the file system from the test account.
- FileSystem fs = testAccount.getFileSystem();
- return readInputStream(fs, filePath);
- }
-
- private int readInputStream(FileSystem fs, Path filePath) throws IOException {
- // Read the file
- InputStream inputStream = fs.open(filePath);
- int count = 0;
- while (inputStream.read() >= 0) {
- count++;
- }
- inputStream.close();
-
- // Return true if the the count is equivalent to the file size.
- return count;
- }
-
- // Positive tests to exercise making a connection with to Azure account using
- // account key.
- @Test
- public void testConnectUsingKey() throws Exception {
-
- testAccount = AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
-
- // Validate input and output on the connection.
- assertTrue(validateIOStreams(new Path("/wasb_scheme")));
- }
-
- @Test
- public void testConnectUsingSAS() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- // Create the test account with SAS credentials.
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer));
- assumeNotNull(testAccount);
- // Validate input and output on the connection.
- // NOTE: As of 4/15/2013, Azure Storage has a deficiency that prevents the
- // full scenario from working (CopyFromBlob doesn't work with SAS), so
- // just do a minor check until that is corrected.
- assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist")));
- //assertTrue(validateIOStreams(new Path("/sastest.txt")));
- }
-
- @Test
- public void testConnectUsingSASReadonly() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- // Create the test account with SAS credentials.
- testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(
- CreateOptions.UseSas, CreateOptions.CreateContainer,
- CreateOptions.Readonly));
- assumeNotNull(testAccount);
-
- // Create a blob in there
- final String blobKey = "blobForReadonly";
- CloudBlobContainer container = testAccount.getRealContainer();
- CloudBlockBlob blob = container.getBlockBlobReference(blobKey);
- ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] { 1,
- 2, 3 });
- blob.upload(inputStream, 3);
- inputStream.close();
-
- // Make sure we can read it from the file system
- Path filePath = new Path("/" + blobKey);
- FileSystem fs = testAccount.getFileSystem();
- assertTrue(fs.exists(filePath));
- byte[] obtained = new byte[3];
- DataInputStream obtainedInputStream = fs.open(filePath);
- obtainedInputStream.readFully(obtained);
- obtainedInputStream.close();
- assertEquals(3, obtained[2]);
- }
-
- @Test
- public void testConnectUsingAnonymous() throws Exception {
-
- // Create test account with anonymous credentials
- testAccount = AzureBlobStorageTestAccount.createAnonymous("testWasb.txt",
- FILE_SIZE);
- assumeNotNull(testAccount);
-
- // Read the file from the public folder using anonymous credentials.
- assertEquals(FILE_SIZE, readInputStream(new Path("/testWasb.txt")));
- }
-
- @Test
- public void testConnectToEmulator() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createForEmulator();
- assumeNotNull(testAccount);
- assertTrue(validateIOStreams(new Path("/testFile")));
- }
-
- /**
- * Tests that we can connect to fully qualified accounts outside of
- * blob.core.windows.net
- */
- @Test
- public void testConnectToFullyQualifiedAccountMock() throws Exception {
- Configuration conf = new Configuration();
- AzureBlobStorageTestAccount.setMockAccountKey(conf,
- "mockAccount.mock.authority.net");
- AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
- MockStorageInterface mockStorage = new MockStorageInterface();
- store.setAzureStorageInteractionLayer(mockStorage);
- NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
- fs.initialize(
- new URI("wasb://mockContainer@mockAccount.mock.authority.net"), conf);
- fs.createNewFile(new Path("/x"));
- assertTrue(mockStorage.getBackingStore().exists(
- "http://mockAccount.mock.authority.net/mockContainer/x"));
- fs.close();
- }
-
- public void testConnectToRoot() throws Exception {
-
- // Set up blob names.
- final String blobPrefix = String.format("wasbtests-%s-%tQ-blob",
- System.getProperty("user.name"), new Date());
- final String inblobName = blobPrefix + "_In" + ".txt";
- final String outblobName = blobPrefix + "_Out" + ".txt";
-
- // Create test account with default root access.
- testAccount = AzureBlobStorageTestAccount.createRoot(inblobName, FILE_SIZE);
- assumeNotNull(testAccount);
-
- // Read the file from the default container.
- assertEquals(FILE_SIZE, readInputStream(new Path(PATH_DELIMITER
- + inblobName)));
-
- try {
- // Capture file system.
- FileSystem fs = testAccount.getFileSystem();
-
- // Create output path and open an output stream to the root folder.
- Path outputPath = new Path(PATH_DELIMITER + outblobName);
- OutputStream outputStream = fs.create(outputPath);
- fail("Expected an AzureException when writing to root folder.");
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
- } catch (AzureException e) {
- assertTrue(true);
- } catch (Exception e) {
- String errMsg = String.format(
- "Expected AzureException but got %s instead.", e);
- assertTrue(errMsg, false);
- }
- }
-
- // Positive tests to exercise throttling I/O path. Connections are made to an
- // Azure account using account key.
- //
- public void testConnectWithThrottling() throws Exception {
-
- testAccount = AzureBlobStorageTestAccount.createThrottled();
-
- // Validate input and output on the connection.
- assertTrue(validateIOStreams(new Path("/wasb_scheme")));
- }
-
- /**
- * Creates a file and writes a single byte with the given value in it.
- */
- private static void writeSingleByte(FileSystem fs, Path testFile, int toWrite)
- throws Exception {
- OutputStream outputStream = fs.create(testFile);
- outputStream.write(toWrite);
- outputStream.close();
- }
-
- /**
- * Reads the file given and makes sure that it's a single-byte file with the
- * given value in it.
- */
- private static void assertSingleByteValue(FileSystem fs, Path testFile,
- int expectedValue) throws Exception {
- InputStream inputStream = fs.open(testFile);
- int byteRead = inputStream.read();
- assertTrue("File unexpectedly empty: " + testFile, byteRead >= 0);
- assertTrue("File has more than a single byte: " + testFile,
- inputStream.read() < 0);
- inputStream.close();
- assertEquals("Unxpected content in: " + testFile, expectedValue, byteRead);
- }
-
- @Test
- public void testMultipleContainers() throws Exception {
- AzureBlobStorageTestAccount firstAccount = AzureBlobStorageTestAccount
- .create("first"), secondAccount = AzureBlobStorageTestAccount
- .create("second");
- assumeNotNull(firstAccount);
- assumeNotNull(secondAccount);
- try {
- FileSystem firstFs = firstAccount.getFileSystem(),
- secondFs = secondAccount.getFileSystem();
- Path testFile = new Path("/testWasb");
- assertTrue(validateIOStreams(firstFs, testFile));
- assertTrue(validateIOStreams(secondFs, testFile));
- // Make sure that we're really dealing with two file systems here.
- writeSingleByte(firstFs, testFile, 5);
- writeSingleByte(secondFs, testFile, 7);
- assertSingleByteValue(firstFs, testFile, 5);
- assertSingleByteValue(secondFs, testFile, 7);
- } finally {
- firstAccount.cleanup();
- secondAccount.cleanup();
- }
- }
-
- @Test
- public void testDefaultKeyProvider() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
- String key = "testkey";
-
- conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
-
- String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
- account, conf);
- assertEquals(key, result);
- }
-
- @Test
- public void testCredsFromCredentialProvider() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- String account = "testacct";
- String key = "testkey";
- // set up conf to have a cred provider
- final Configuration conf = new Configuration();
- final File file = tempDir.newFile("test.jks");
- final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
- file.toURI());
- conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
- jks.toString());
-
- provisionAccountKey(conf, account, key);
-
- // also add to configuration as clear text that should be overridden
- conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,
- key + "cleartext");
-
- String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
- account, conf);
- // result should contain the credential provider key not the config key
- assertEquals("AccountKey incorrect.", key, result);
- }
-
- void provisionAccountKey(
- final Configuration conf, String account, String key) throws Exception {
- // add our creds to the provider
- final CredentialProvider provider =
- CredentialProviderFactory.getProviders(conf).get(0);
- provider.createCredentialEntry(
- SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key.toCharArray());
- provider.flush();
- }
-
- @Test
- public void testValidKeyProvider() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
- String key = "testkey";
-
- conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
- conf.setClass("fs.azure.account.keyprovider." + account,
- SimpleKeyProvider.class, KeyProvider.class);
- String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
- account, conf);
- assertEquals(key, result);
- }
-
- @Test
- public void testInvalidKeyProviderNonexistantClass() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
-
- conf.set("fs.azure.account.keyprovider." + account,
- "org.apache.Nonexistant.Class");
- try {
- AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
- Assert.fail("Nonexistant key provider class should have thrown a "
- + "KeyProviderException");
- } catch (KeyProviderException e) {
- }
- }
-
- @Test
- public void testInvalidKeyProviderWrongClass() throws Exception {
- Configuration conf = new Configuration();
- String account = "testacct";
-
- conf.set("fs.azure.account.keyprovider." + account, "java.lang.String");
- try {
- AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
- Assert.fail("Key provider class that doesn't implement KeyProvider "
- + "should have thrown a KeyProviderException");
- } catch (KeyProviderException e) {
- }
- }
-
- /**
- * Tests the cases when the URI is specified with no authority, i.e.
- * wasb:///path/to/file.
- */
- @Test
- public void testNoUriAuthority() throws Exception {
- // For any combination of default FS being asv(s)/wasb(s)://c@a/ and
- // the actual URI being asv(s)/wasb(s):///, it should work.
-
- String[] wasbAliases = new String[] { "wasb", "wasbs" };
- for (String defaultScheme : wasbAliases) {
- for (String wantedScheme : wasbAliases) {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI(defaultScheme, authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- // Add references to file system implementations for wasb and wasbs.
- conf.addResource("azure-test.xml");
- URI wantedUri = new URI(wantedScheme + ":///random/path");
- NativeAzureFileSystem obtained = (NativeAzureFileSystem) FileSystem
- .get(wantedUri, conf);
- assertNotNull(obtained);
- assertEquals(new URI(wantedScheme, authority, null, null, null),
- obtained.getUri());
- // Make sure makeQualified works as expected
- Path qualified = obtained.makeQualified(new Path(wantedUri));
- assertEquals(new URI(wantedScheme, authority, wantedUri.getPath(),
- null, null), qualified.toUri());
- // Cleanup for the next iteration to not cache anything in FS
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
- // If the default FS is not a WASB FS, then specifying a URI without
- // authority for the Azure file system should throw.
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- conf.set(FS_DEFAULT_NAME_KEY, "file:///");
- try {
- FileSystem.get(new URI("wasb:///random/path"), conf);
- fail("Should've thrown.");
- } catch (IllegalArgumentException e) {
- }
- }
-
- @Test
- public void testWasbAsDefaultFileSystemHasNoPort() throws Exception {
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasb", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.addResource("azure-test.xml");
-
- FileSystem fs = FileSystem.get(conf);
- assertTrue(fs instanceof NativeAzureFileSystem);
- assertEquals(-1, fs.getUri().getPort());
-
- AbstractFileSystem afs = FileContext.getFileContext(conf)
- .getDefaultFileSystem();
- assertTrue(afs instanceof Wasb);
- assertEquals(-1, afs.getUri().getPort());
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-
- /**
- * Tests the cases when the scheme specified is 'wasbs'.
- */
- @Test
- public void testAbstractFileSystemImplementationForWasbsScheme() throws Exception {
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
- conf.addResource("azure-test.xml");
-
- FileSystem fs = FileSystem.get(conf);
- assertTrue(fs instanceof NativeAzureFileSystem);
- assertEquals("wasbs", fs.getScheme());
-
- AbstractFileSystem afs = FileContext.getFileContext(conf)
- .getDefaultFileSystem();
- assertTrue(afs instanceof Wasbs);
- assertEquals(-1, afs.getUri().getPort());
- assertEquals("wasbs", afs.getUri().getScheme());
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-
- @Test
- public void testNoAbstractFileSystemImplementationSpecifiedForWasbsScheme() throws Exception {
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
-
- FileSystem fs = FileSystem.get(conf);
- assertTrue(fs instanceof NativeAzureFileSystem);
- assertEquals("wasbs", fs.getScheme());
-
- // should throw if 'fs.AbstractFileSystem.wasbs.impl'' is not specified
- try{
- FileContext.getFileContext(conf).getDefaultFileSystem();
- fail("Should've thrown.");
- }catch(UnsupportedFileSystemException e){
- }
-
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-
- @Test
- public void testCredentialProviderPathExclusions() throws Exception {
- String providerPath =
- "user:///,jceks://wasb/user/hrt_qa/sqoopdbpasswd.jceks," +
- "jceks://hdfs@nn1.example.com/my/path/test.jceks";
- Configuration config = new Configuration();
- config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
- providerPath);
- String newPath = "user:///,jceks://hdfs@nn1.example.com/my/path/test.jceks";
-
- excludeAndTestExpectations(config, newPath);
- }
-
- @Test
- public void testExcludeAllProviderTypesFromConfig() throws Exception {
- String providerPath =
- "jceks://wasb/tmp/test.jceks," +
- "jceks://wasb@/my/path/test.jceks";
- Configuration config = new Configuration();
- config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
- providerPath);
- String newPath = null;
-
- excludeAndTestExpectations(config, newPath);
- }
-
- void excludeAndTestExpectations(Configuration config, String newPath)
- throws Exception {
- Configuration conf = ProviderUtils.excludeIncompatibleCredentialProviders(
- config, NativeAzureFileSystem.class);
- String effectivePath = conf.get(
- CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
- assertEquals(newPath, effectivePath);
- }
-
- @Test
- public void testUserAgentConfig() throws Exception {
- // Set the user agent
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
-
- conf.set(AzureNativeFileSystemStore.USER_AGENT_ID_KEY, "TestClient");
-
- FileSystem fs = FileSystem.get(conf);
- AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
-
- assertTrue(afs instanceof Wasbs);
- assertEquals(-1, afs.getUri().getPort());
- assertEquals("wasbs", afs.getUri().getScheme());
-
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
-
- // Unset the user agent
- try {
- testAccount = AzureBlobStorageTestAccount.createMock();
- Configuration conf = testAccount.getFileSystem().getConf();
- String authority = testAccount.getFileSystem().getUri().getAuthority();
- URI defaultUri = new URI("wasbs", authority, null, null, null);
- conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
- conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
-
- conf.unset(AzureNativeFileSystemStore.USER_AGENT_ID_KEY);
-
- FileSystem fs = FileSystem.get(conf);
- AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
- assertTrue(afs instanceof Wasbs);
- assertEquals(-1, afs.getUri().getPort());
- assertEquals("wasbs", afs.getUri().getScheme());
-
- } finally {
- testAccount.cleanup();
- FileSystem.closeAll();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
new file mode 100644
index 0000000..fd21bd2
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
+
+/**
+ * Append test, skipping one of them.
+ */
+
+public class ITestAzureNativeContractAppend extends AbstractContractAppendTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+
+ @Override
+ public void testRenameFileBeingAppended() throws Throwable {
+ skip("Skipping as renaming an opened file is not supported");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java
new file mode 100644
index 0000000..0ac046a
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractCreate extends AbstractContractCreateTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java
new file mode 100644
index 0000000..4c6dd48
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractDelete extends AbstractContractDeleteTest {
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java
new file mode 100644
index 0000000..7769570
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled;
+
+/**
+ * Contract test suite covering WASB integration with DistCp.
+ */
+public class ITestAzureNativeContractDistCp extends AbstractContractDistCpTest {
+
+ @Override
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
+ }
+
+ @Override
+ protected NativeAzureFileSystemContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ assumeScaleTestsEnabled(getContract().getConf());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java
new file mode 100644
index 0000000..9c09c0d
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractGetFileStatus.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractGetFileStatus
+ extends AbstractContractGetFileStatusTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java
new file mode 100644
index 0000000..71654b8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractMkdir.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractMkdir extends AbstractContractMkdirTest {
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java
new file mode 100644
index 0000000..0b174e6
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractOpen.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractOpen extends AbstractContractOpenTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
new file mode 100644
index 0000000..474b874
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractRename extends AbstractContractRenameTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java
new file mode 100644
index 0000000..673d5f8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractSeek.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Contract test.
+ */
+public class ITestAzureNativeContractSeek extends AbstractContractSeekTest{
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new NativeAzureFileSystemContract(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
index 28c13ea..a264aca 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/NativeAzureFileSystemContract.java
@@ -18,15 +18,21 @@
package org.apache.hadoop.fs.azure.contract;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
+/**
+ * Azure Contract. Test paths are created using any maven fork
+ * identifier, if defined. This guarantees paths unique to tests
+ * running in parallel.
+ */
public class NativeAzureFileSystemContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "wasb.xml";
- protected NativeAzureFileSystemContract(Configuration conf) {
- super(conf);
- //insert the base features
+ public NativeAzureFileSystemContract(Configuration conf) {
+ super(conf); //insert the base features
addConfResource(CONTRACT_XML);
}
@@ -34,4 +40,9 @@ public class NativeAzureFileSystemContract extends AbstractBondedFSContract {
public String getScheme() {
return "wasb";
}
-}
\ No newline at end of file
+
+ @Override
+ public Path getTestPath() {
+ return AzureTestUtils.createTestPath(super.getTestPath());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java
deleted file mode 100644
index 8a2341e..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractAppend.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Test;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
-
-public class TestAzureNativeContractAppend extends AbstractContractAppendTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-
- @Override
- public void testRenameFileBeingAppended() throws Throwable {
- skip("Skipping as renaming an opened file is not supported");
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java
deleted file mode 100644
index 531552d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractCreate.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractCreate extends AbstractContractCreateTest{
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java
deleted file mode 100644
index 5e5c13b..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDelete.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractDelete extends AbstractContractDeleteTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java
deleted file mode 100644
index a3750d4..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractDistCp.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-
-/**
- * Contract test suite covering WASB integration with DistCp.
- */
-public class TestAzureNativeContractDistCp extends AbstractContractDistCpTest {
-
- @Override
- protected NativeAzureFileSystemContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java
deleted file mode 100644
index b0c59ee..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractGetFileStatus.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractGetFileStatus extends AbstractContractGetFileStatusTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java
deleted file mode 100644
index 36df041..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractMkdir.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractMkdir extends AbstractContractMkdirTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java
deleted file mode 100644
index d5147ac..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractOpen.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractOpen extends AbstractContractOpenTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java
deleted file mode 100644
index 4d8b2b5..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractRename.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractRename extends AbstractContractRenameTest {
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java
deleted file mode 100644
index 30046dc..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/TestAzureNativeContractSeek.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-public class TestAzureNativeContractSeek extends AbstractContractSeekTest{
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new NativeAzureFileSystemContract(conf);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java
new file mode 100644
index 0000000..062d073
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AbstractAzureScaleTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+
+/**
+ * Scale tests are only executed if the scale profile
+ * is set; the setup method will check this and skip
+ * tests if not.
+ *
+ */
+public abstract class AbstractAzureScaleTest
+ extends AbstractWasbTestBase implements Sizes {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(AbstractAzureScaleTest.class);
+
+ @Override
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ LOG.debug("Scale test operation count = {}", getOperationCount());
+ assumeScaleTestsEnabled(getConfiguration());
+ }
+
+ /**
+ * Create the test account.
+ * @return a test account
+ * @throws Exception on any failure to create the account.
+ */
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(createConfiguration());
+ }
+
+ protected long getOperationCount() {
+ return getConfiguration().getLong(KEY_OPERATION_COUNT,
+ DEFAULT_OPERATION_COUNT);
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
index 177477c..726b504 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
@@ -18,12 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
@@ -47,16 +41,18 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
-import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob;
+import static org.apache.hadoop.test.GenericTestUtils.*;
+
/*
* Tests the Native Azure file system (WASB) against an actual blob store if
* provided in the environment.
@@ -71,15 +67,46 @@ public abstract class NativeAzureFileSystemBaseTest
private final long modifiedTimeErrorMargin = 5 * 1000; // Give it +/-5 seconds
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class);
+ protected NativeAzureFileSystem fs;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ fs = getFileSystem();
+ }
+
+ /**
+ * Assert that a path does not exist.
+ *
+ * @param message message to include in the assertion failure message
+ * @param path path in the filesystem
+ * @throws IOException IO problems
+ */
+ public void assertPathDoesNotExist(String message,
+ Path path) throws IOException {
+ ContractTestUtils.assertPathDoesNotExist(fs, message, path);
+ }
+
+ /**
+ * Assert that a path exists.
+ *
+ * @param message message to include in the assertion failure message
+ * @param path path in the filesystem
+ * @throws IOException IO problems
+ */
+ public void assertPathExists(String message,
+ Path path) throws IOException {
+ ContractTestUtils.assertPathExists(fs, message, path);
+ }
@Test
public void testCheckingNonExistentOneLetterFile() throws Exception {
- assertFalse(fs.exists(new Path("/a")));
+ assertPathDoesNotExist("one letter file", new Path("/a"));
}
@Test
public void testStoreRetrieveFile() throws Exception {
- Path testFile = new Path("unit-test-file");
+ Path testFile = methodPath();
writeString(testFile, "Testing");
assertTrue(fs.exists(testFile));
FileStatus status = fs.getFileStatus(testFile);
@@ -93,7 +120,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testStoreDeleteFolder() throws Exception {
- Path testFolder = new Path("storeDeleteFolder");
+ Path testFolder = methodPath();
assertFalse(fs.exists(testFolder));
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.exists(testFolder));
@@ -105,22 +132,22 @@ public abstract class NativeAzureFileSystemBaseTest
assertEquals(new FsPermission((short) 0755), status.getPermission());
Path innerFile = new Path(testFolder, "innerFile");
assertTrue(fs.createNewFile(innerFile));
- assertTrue(fs.exists(innerFile));
+ assertPathExists("inner file", innerFile);
assertTrue(fs.delete(testFolder, true));
- assertFalse(fs.exists(innerFile));
- assertFalse(fs.exists(testFolder));
+ assertPathDoesNotExist("inner file", innerFile);
+ assertPathDoesNotExist("testFolder", testFolder);
}
@Test
public void testFileOwnership() throws Exception {
- Path testFile = new Path("ownershipTestFile");
+ Path testFile = methodPath();
writeString(testFile, "Testing");
testOwnership(testFile);
}
@Test
public void testFolderOwnership() throws Exception {
- Path testFolder = new Path("ownershipTestFolder");
+ Path testFolder = methodPath();
fs.mkdirs(testFolder);
testOwnership(testFolder);
}
@@ -147,7 +174,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testFilePermissions() throws Exception {
- Path testFile = new Path("permissionTestFile");
+ Path testFile = methodPath();
FsPermission permission = FsPermission.createImmutable((short) 644);
createEmptyFile(testFile, permission);
FileStatus ret = fs.getFileStatus(testFile);
@@ -157,7 +184,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testFolderPermissions() throws Exception {
- Path testFolder = new Path("permissionTestFolder");
+ Path testFolder = methodPath();
FsPermission permission = FsPermission.createImmutable((short) 644);
fs.mkdirs(testFolder, permission);
FileStatus ret = fs.getFileStatus(testFolder);
@@ -176,9 +203,9 @@ public abstract class NativeAzureFileSystemBaseTest
createEmptyFile(testFile, permission);
FsPermission rootPerm = fs.getFileStatus(firstDir.getParent()).getPermission();
FsPermission inheritPerm = FsPermission.createImmutable((short)(rootPerm.toShort() | 0300));
- assertTrue(fs.exists(testFile));
- assertTrue(fs.exists(firstDir));
- assertTrue(fs.exists(middleDir));
+ assertPathExists("test file", testFile);
+ assertPathExists("firstDir", firstDir);
+ assertPathExists("middleDir", middleDir);
// verify that the indirectly created directory inherited its permissions from the root directory
FileStatus directoryStatus = fs.getFileStatus(middleDir);
assertTrue(directoryStatus.isDirectory());
@@ -188,7 +215,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertFalse(fileStatus.isDirectory());
assertEqualsIgnoreStickyBit(umaskedPermission, fileStatus.getPermission());
assertTrue(fs.delete(firstDir, true));
- assertFalse(fs.exists(testFile));
+ assertPathDoesNotExist("deleted file", testFile);
// An alternative test scenario would've been to delete the file first,
// and then check for the existence of the upper folders still. But that
@@ -264,7 +291,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertTrue(fs.delete(new Path("deep"), true));
}
- private static enum RenameFolderVariation {
+ private enum RenameFolderVariation {
CreateFolderAndInnerFile, CreateJustInnerFile, CreateJustFolder
}
@@ -303,10 +330,10 @@ public abstract class NativeAzureFileSystemBaseTest
localFs.delete(localFilePath, true);
try {
writeString(localFs, localFilePath, "Testing");
- Path dstPath = new Path("copiedFromLocal");
+ Path dstPath = methodPath();
assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
fs.getConf()));
- assertTrue(fs.exists(dstPath));
+ assertPathExists("coied from local", dstPath);
assertEquals("Testing", readString(fs, dstPath));
fs.delete(dstPath, true);
} finally {
@@ -423,32 +450,32 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testReadingDirectoryAsFile() throws Exception {
- Path dir = new Path("/x");
+ Path dir = methodPath();
assertTrue(fs.mkdirs(dir));
try {
fs.open(dir).close();
assertTrue("Should've thrown", false);
} catch (FileNotFoundException ex) {
- assertEquals("/x is a directory not a file.", ex.getMessage());
+ assertExceptionContains("a directory not a file.", ex);
}
}
@Test
public void testCreatingFileOverDirectory() throws Exception {
- Path dir = new Path("/x");
+ Path dir = methodPath();
assertTrue(fs.mkdirs(dir));
try {
fs.create(dir).close();
assertTrue("Should've thrown", false);
} catch (IOException ex) {
- assertEquals("Cannot create file /x; already exists as a directory.",
- ex.getMessage());
+ assertExceptionContains("Cannot create file", ex);
+ assertExceptionContains("already exists as a directory", ex);
}
}
@Test
public void testInputStreamReadWithZeroSizeBuffer() throws Exception {
- Path newFile = new Path("zeroSizeRead");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@@ -460,7 +487,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEof() throws Exception {
- Path newFile = new Path("eofRead");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@@ -482,7 +509,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEofForLargeBuffer() throws Exception {
- Path newFile = new Path("eofRead2");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
byte[] outputBuff = new byte[97331];
for(int i = 0; i < outputBuff.length; ++i) {
@@ -508,7 +535,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadIntReturnsMinusOneOnEof() throws Exception {
- Path newFile = new Path("eofRead3");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@@ -525,7 +552,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetPermissionOnFile() throws Exception {
- Path newFile = new Path("testPermission");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
@@ -540,14 +567,14 @@ public abstract class NativeAzureFileSystemBaseTest
// Don't check the file length for page blobs. Only block blobs
// provide the actual length of bytes written.
- if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
+ if (!(this instanceof ITestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
}
@Test
public void testSetPermissionOnFolder() throws Exception {
- Path newFolder = new Path("testPermission");
+ Path newFolder = methodPath();
assertTrue(fs.mkdirs(newFolder));
FsPermission newPermission = new FsPermission((short) 0600);
fs.setPermission(newFolder, newPermission);
@@ -559,7 +586,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetOwnerOnFile() throws Exception {
- Path newFile = new Path("testOwner");
+ Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
@@ -571,7 +598,7 @@ public abstract class NativeAzureFileSystemBaseTest
// File length is only reported to be the size of bytes written to the file for block blobs.
// So only check it for block blobs, not page blobs.
- if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
+ if (!(this instanceof ITestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
fs.setOwner(newFile, null, "newGroup");
@@ -583,7 +610,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetOwnerOnFolder() throws Exception {
- Path newFolder = new Path("testOwner");
+ Path newFolder = methodPath();
assertTrue(fs.mkdirs(newFolder));
fs.setOwner(newFolder, "newUser", null);
FileStatus newStatus = fs.getFileStatus(newFolder);
@@ -594,21 +621,21 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testModifiedTimeForFile() throws Exception {
- Path testFile = new Path("testFile");
+ Path testFile = methodPath();
fs.create(testFile).close();
testModifiedTime(testFile);
}
@Test
public void testModifiedTimeForFolder() throws Exception {
- Path testFolder = new Path("testFolder");
+ Path testFolder = methodPath();
assertTrue(fs.mkdirs(testFolder));
testModifiedTime(testFolder);
}
@Test
public void testFolderLastModifiedTime() throws Exception {
- Path parentFolder = new Path("testFolder");
+ Path parentFolder = methodPath();
Path innerFile = new Path(parentFolder, "innerfile");
assertTrue(fs.mkdirs(parentFolder));
@@ -983,7 +1010,7 @@ public abstract class NativeAzureFileSystemBaseTest
// Make sure rename pending file is gone.
FileStatus[] listed = fs.listStatus(new Path("/"));
- assertEquals(1, listed.length);
+ assertEquals("Pending directory still found", 1, listed.length);
assertTrue(listed[0].isDirectory());
}
@@ -1681,7 +1708,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertTrue("Unanticipated exception", false);
}
} else {
- assertTrue("Unknown thread name", false);
+ fail("Unknown thread name");
}
LOG.info(name + " is exiting.");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt
deleted file mode 100644
index 54ba4d8..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/RunningLiveWasbTests.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-========================================================================
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-=========================================================================
-
-In order to run Windows Azure Storage Blob (WASB) unit tests against a live
-Azure Storage account, you need to provide test account details in a configuration
-file called azure-test.xml. See hadoop-tools/hadoop-azure/README.txt for details
-on configuration, and how to run the tests.
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
deleted file mode 100644
index a10a366..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.*;
-import java.util.Arrays;
-
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestAzureConcurrentOutOfBandIo {
-
- // Class constants.
- static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
- static final int UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
- static final int BLOB_SIZE = 32 * 1024 * 1024;
-
- // Number of blocks to be written before flush.
- static final int NUMBER_OF_BLOCKS = 2;
-
- protected AzureBlobStorageTestAccount testAccount;
-
- // Overridden TestCase methods.
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
- UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
- assumeNotNull(testAccount);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- class DataBlockWriter implements Runnable {
-
- Thread runner;
- AzureBlobStorageTestAccount writerStorageAccount;
- String key;
- boolean done = false;
-
- /**
- * Constructor captures the test account.
- *
- * @param testAccount
- */
- public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) {
- writerStorageAccount = testAccount;
- this.key = key;
- }
-
- /**
- * Start writing blocks to Azure storage.
- */
- public void startWriting() {
- runner = new Thread(this); // Create the block writer thread.
- runner.start(); // Start the block writer thread.
- }
-
- /**
- * Stop writing blocks to Azure storage.
- */
- public void stopWriting() {
- done = true;
- }
-
- /**
- * Implementation of the runnable interface. The run method is a tight loop
- * which repeatedly updates the blob with a 4 MB block.
- */
- public void run() {
- byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
-
- OutputStream outputStream = null;
-
- try {
- for (int i = 0; !done; i++) {
- // Write two 4 MB blocks to the blob.
- //
- outputStream = writerStorageAccount.getStore().storefile(
- key,
- new PermissionStatus("", "", FsPermission.getDefault()),
- key);
-
- Arrays.fill(dataBlockWrite, (byte) (i % 256));
- for (int j = 0; j < NUMBER_OF_BLOCKS; j++) {
- outputStream.write(dataBlockWrite);
- }
-
- outputStream.flush();
- outputStream.close();
- }
- } catch (AzureException e) {
- System.out
- .println("DatablockWriter thread encountered a storage exception."
- + e.getMessage());
- } catch (IOException e) {
- System.out
- .println("DatablockWriter thread encountered an I/O exception."
- + e.getMessage());
- }
- }
- }
-
- @Test
- public void testReadOOBWrites() throws Exception {
-
- byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
- byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
-
- // Write to blob to make sure it exists.
- //
- // Write five 4 MB blocks to the blob. To ensure there is data in the blob before
- // reading. This eliminates the race between the reader and writer threads.
- OutputStream outputStream = testAccount.getStore().storefile(
- "WASB_String.txt",
- new PermissionStatus("", "", FsPermission.getDefault()),
- "WASB_String.txt");
- Arrays.fill(dataBlockWrite, (byte) 255);
- for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
- outputStream.write(dataBlockWrite);
- }
-
- outputStream.flush();
- outputStream.close();
-
- // Start writing blocks to Azure store using the DataBlockWriter thread.
- DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount,
- "WASB_String.txt");
- writeBlockTask.startWriting();
- int count = 0;
- InputStream inputStream = null;
-
- for (int i = 0; i < 5; i++) {
- try {
- inputStream = testAccount.getStore().retrieve("WASB_String.txt");
- count = 0;
- int c = 0;
-
- while (c >= 0) {
- c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
- if (c < 0) {
- break;
- }
-
- // Counting the number of bytes.
- count += c;
- }
- } catch (IOException e) {
- System.out.println(e.getCause().toString());
- e.printStackTrace();
- fail();
- }
-
- // Close the stream.
- if (null != inputStream){
- inputStream.close();
- }
- }
-
- // Stop writing blocks.
- writeBlockTask.stopWriting();
-
- // Validate that a block was read.
- assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java
deleted file mode 100644
index 687b785..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIoWithSecureMode.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-/**
- * Extends TestAzureConcurrentOutOfBandIo in order to run testReadOOBWrites with secure mode
- * (fs.azure.secure.mode) both enabled and disabled.
- */
-public class TestAzureConcurrentOutOfBandIoWithSecureMode extends TestAzureConcurrentOutOfBandIo {
-
- // Overridden TestCase methods.
- @Before
- @Override
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
- UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE, true);
- assumeNotNull(testAccount);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
deleted file mode 100644
index c985224..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.HttpURLConnection;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.HashMap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-
-import com.microsoft.azure.storage.OperationContext;
-import com.microsoft.azure.storage.SendingRequestEvent;
-import com.microsoft.azure.storage.StorageEvent;
-
-public class TestAzureFileSystemErrorConditions {
- private static final int ALL_THREE_FILE_SIZE = 1024;
-
- @Test
- public void testNoInitialize() throws Exception {
- AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
- boolean passed = false;
- try {
- store.retrieveMetadata("foo");
- passed = true;
- } catch (AssertionError e) {
- }
- assertFalse(
- "Doing an operation on the store should throw if not initalized.",
- passed);
- }
-
- /**
- * Try accessing an unauthorized or non-existent (treated the same) container
- * from WASB.
- */
- @Test
- public void testAccessUnauthorizedPublicContainer() throws Exception {
- final String container = "nonExistentContainer";
- final String account = "hopefullyNonExistentAccount";
- Path noAccessPath = new Path(
- "wasb://" + container + "@" + account + "/someFile");
- NativeAzureFileSystem.suppressRetryPolicy();
- try {
- FileSystem.get(noAccessPath.toUri(), new Configuration())
- .open(noAccessPath);
- assertTrue("Should've thrown.", false);
- } catch (AzureException ex) {
- GenericTestUtils.assertExceptionContains(
- String.format(NO_ACCESS_TO_CONTAINER_MSG, account, container), ex);
- } finally {
- NativeAzureFileSystem.resumeRetryPolicy();
- }
- }
-
- @Test
- public void testAccessContainerWithWrongVersion() throws Exception {
- AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
- MockStorageInterface mockStorage = new MockStorageInterface();
- store.setAzureStorageInteractionLayer(mockStorage);
- FileSystem fs = new NativeAzureFileSystem(store);
- try {
- Configuration conf = new Configuration();
- AzureBlobStorageTestAccount.setMockAccountKey(conf);
- HashMap<String, String> metadata = new HashMap<String, String>();
- metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
- "2090-04-05"); // It's from the future!
- mockStorage.addPreExistingContainer(
- AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
-
- boolean passed = false;
- try {
- fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
- fs.listStatus(new Path("/"));
- passed = true;
- } catch (AzureException ex) {
- assertTrue("Unexpected exception message: " + ex,
- ex.getMessage().contains("unsupported version: 2090-04-05."));
- }
- assertFalse("Should've thrown an exception because of the wrong version.",
- passed);
- } finally {
- fs.close();
- }
- }
-
- private interface ConnectionRecognizer {
- boolean isTargetConnection(HttpURLConnection connection);
- }
-
- private class TransientErrorInjector extends StorageEvent<SendingRequestEvent> {
- final ConnectionRecognizer connectionRecognizer;
- private boolean injectedErrorOnce = false;
-
- public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
- this.connectionRecognizer = connectionRecognizer;
- }
-
- @Override
- public void eventOccurred(SendingRequestEvent eventArg) {
- HttpURLConnection connection = (HttpURLConnection)eventArg.getConnectionObject();
- if (!connectionRecognizer.isTargetConnection(connection)) {
- return;
- }
- if (!injectedErrorOnce) {
- connection.setReadTimeout(1);
- connection.disconnect();
- injectedErrorOnce = true;
- }
- }
- }
-
- private void injectTransientError(NativeAzureFileSystem fs,
- final ConnectionRecognizer connectionRecognizer) {
- fs.getStore().addTestHookToOperationContext(new TestHookOperationContext() {
- @Override
- public OperationContext modifyOperationContext(OperationContext original) {
- original.getSendingRequestEventHandler().addListener(
- new TransientErrorInjector(connectionRecognizer));
- return original;
- }
- });
- }
-
- @Test
- public void testTransientErrorOnDelete() throws Exception {
- // Need to do this test against a live storage account
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
- try {
- NativeAzureFileSystem fs = testAccount.getFileSystem();
- injectTransientError(fs, new ConnectionRecognizer() {
- @Override
- public boolean isTargetConnection(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("DELETE");
- }
- });
- Path testFile = new Path("/a/b");
- assertTrue(fs.createNewFile(testFile));
- assertTrue(fs.rename(testFile, new Path("/x")));
- } finally {
- testAccount.cleanup();
- }
- }
-
- private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
- throws IOException {
- byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
- Arrays.fill(buffer, (byte)3);
- OutputStream stream = fs.create(testFile);
- stream.write(buffer);
- stream.close();
- }
-
- private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)
- throws IOException {
- byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
- InputStream inStream = fs.open(testFile);
- assertEquals(buffer.length,
- inStream.read(buffer, 0, buffer.length));
- inStream.close();
- for (int i = 0; i < buffer.length; i++) {
- assertEquals(3, buffer[i]);
- }
- }
-
- @Test
- public void testTransientErrorOnCommitBlockList() throws Exception {
- // Need to do this test against a live storage account
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
- try {
- NativeAzureFileSystem fs = testAccount.getFileSystem();
- injectTransientError(fs, new ConnectionRecognizer() {
- @Override
- public boolean isTargetConnection(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("PUT")
- && connection.getURL().getQuery() != null
- && connection.getURL().getQuery().contains("blocklist");
- }
- });
- Path testFile = new Path("/a/b");
- writeAllThreeFile(fs, testFile);
- readAllThreeFile(fs, testFile);
- } finally {
- testAccount.cleanup();
- }
- }
-
- @Test
- public void testTransientErrorOnRead() throws Exception {
- // Need to do this test against a live storage account
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- assumeNotNull(testAccount);
- try {
- NativeAzureFileSystem fs = testAccount.getFileSystem();
- Path testFile = new Path("/a/b");
- writeAllThreeFile(fs, testFile);
- injectTransientError(fs, new ConnectionRecognizer() {
- @Override
- public boolean isTargetConnection(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("GET");
- }
- });
- readAllThreeFile(fs, testFile);
- } finally {
- testAccount.cleanup();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
deleted file mode 100644
index ea17b62..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.HttpURLConnection;
-import java.util.Arrays;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
-import org.junit.After;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.Constants;
-import com.microsoft.azure.storage.OperationContext;
-import com.microsoft.azure.storage.ResponseReceivedEvent;
-import com.microsoft.azure.storage.StorageErrorCodeStrings;
-import com.microsoft.azure.storage.StorageEvent;
-import com.microsoft.azure.storage.StorageException;
-import com.microsoft.azure.storage.blob.BlockEntry;
-import com.microsoft.azure.storage.blob.BlockSearchMode;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-import com.microsoft.azure.storage.core.Base64;
-
-/**
- * Test that we do proper data integrity validation with MD5 checks as
- * configured.
- */
-public class TestBlobDataValidation {
- private AzureBlobStorageTestAccount testAccount;
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- /**
- * Test that by default we don't store the blob-level MD5.
- */
- @Test
- public void testBlobMd5StoreOffByDefault() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- testStoreBlobMd5(false);
- }
-
- /**
- * Test that we get blob-level MD5 storage and validation if we specify that
- * in the configuration.
- */
- @Test
- public void testStoreBlobMd5() throws Exception {
- Configuration conf = new Configuration();
- conf.setBoolean(KEY_STORE_BLOB_MD5, true);
- testAccount = AzureBlobStorageTestAccount.create(conf);
- testStoreBlobMd5(true);
- }
-
- private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
- assumeNotNull(testAccount);
- // Write a test file.
- String testFileKey = "testFile";
- Path testFilePath = new Path("/" + testFileKey);
- OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
- outStream.write(new byte[] { 5, 15 });
- outStream.close();
-
- // Check that we stored/didn't store the MD5 field as configured.
- CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
- blob.downloadAttributes();
- String obtainedMd5 = blob.getProperties().getContentMD5();
- if (expectMd5Stored) {
- assertNotNull(obtainedMd5);
- } else {
- assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
- }
-
- // Mess with the content so it doesn't match the MD5.
- String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
- blob.uploadBlock(newBlockId,
- new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
- blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(
- newBlockId, BlockSearchMode.UNCOMMITTED) }));
-
- // Now read back the content. If we stored the MD5 for the blob content
- // we should get a data corruption error.
- InputStream inStream = testAccount.getFileSystem().open(testFilePath);
- try {
- byte[] inBuf = new byte[100];
- while (inStream.read(inBuf) > 0){
- //nothing;
- }
- inStream.close();
- if (expectMd5Stored) {
- fail("Should've thrown because of data corruption.");
- }
- } catch (IOException ex) {
- if (!expectMd5Stored) {
- throw ex;
- }
- StorageException cause = (StorageException)ex.getCause();
- assertNotNull(cause);
- assertEquals("Unexpected cause: " + cause,
- StorageErrorCodeStrings.INVALID_MD5, cause.getErrorCode());
- }
- }
-
- /**
- * Test that by default we check block-level MD5.
- */
- @Test
- public void testCheckBlockMd5() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- testCheckBlockMd5(true);
- }
-
- /**
- * Test that we don't check block-level MD5 if we specify that in the
- * configuration.
- */
- @Test
- public void testDontCheckBlockMd5() throws Exception {
- Configuration conf = new Configuration();
- conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
- testAccount = AzureBlobStorageTestAccount.create(conf);
- testCheckBlockMd5(false);
- }
-
- /**
- * Connection inspector to check that MD5 fields for content is set/not set as
- * expected.
- */
- private static class ContentMD5Checker extends
- StorageEvent<ResponseReceivedEvent> {
- private final boolean expectMd5;
-
- public ContentMD5Checker(boolean expectMd5) {
- this.expectMd5 = expectMd5;
- }
-
- @Override
- public void eventOccurred(ResponseReceivedEvent eventArg) {
- HttpURLConnection connection = (HttpURLConnection) eventArg
- .getConnectionObject();
- if (isGetRange(connection)) {
- checkObtainedMd5(connection
- .getHeaderField(Constants.HeaderConstants.CONTENT_MD5));
- } else if (isPutBlock(connection)) {
- checkObtainedMd5(connection
- .getRequestProperty(Constants.HeaderConstants.CONTENT_MD5));
- }
- }
-
- private void checkObtainedMd5(String obtainedMd5) {
- if (expectMd5) {
- assertNotNull(obtainedMd5);
- } else {
- assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
- }
- }
-
- private static boolean isPutBlock(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("PUT")
- && connection.getURL().getQuery() != null
- && connection.getURL().getQuery().contains("blockid");
- }
-
- private static boolean isGetRange(HttpURLConnection connection) {
- return connection.getRequestMethod().equals("GET")
- && connection
- .getHeaderField(Constants.HeaderConstants.STORAGE_RANGE_HEADER) != null;
- }
- }
-
- private void testCheckBlockMd5(final boolean expectMd5Checked)
- throws Exception {
- assumeNotNull(testAccount);
- Path testFilePath = new Path("/testFile");
-
- // Add a hook to check that for GET/PUT requests we set/don't set
- // the block-level MD5 field as configured. I tried to do clever
- // testing by also messing with the raw data to see if we actually
- // validate the data as expected, but the HttpURLConnection wasn't
- // pluggable enough for me to do that.
- testAccount.getFileSystem().getStore()
- .addTestHookToOperationContext(new TestHookOperationContext() {
- @Override
- public OperationContext modifyOperationContext(
- OperationContext original) {
- original.getResponseReceivedEventHandler().addListener(
- new ContentMD5Checker(expectMd5Checked));
- return original;
- }
- });
-
- OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
- outStream.write(new byte[] { 5, 15 });
- outStream.close();
-
- InputStream inStream = testAccount.getFileSystem().open(testFilePath);
- byte[] inBuf = new byte[100];
- while (inStream.read(inBuf) > 0){
- //nothing;
- }
- inStream.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
index 6c49926..30c1028 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
@@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
@@ -42,7 +37,7 @@ import org.junit.Test;
/**
* Tests that we put the correct metadata on blobs created through WASB.
*/
-public class TestBlobMetadata {
+public class TestBlobMetadata extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
index 07d4ebc..aca5f81 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java
@@ -33,9 +33,6 @@ import org.junit.Test;
import java.net.HttpURLConnection;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertEquals;
-
/**
* Tests for <code>BlobOperationDescriptor</code>.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
deleted file mode 100644
index afb16ef..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.*;
-import java.util.*;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
-
-import junit.framework.*;
-
-import org.junit.Test;
-
-
-/**
- * A simple benchmark to find out the difference in speed between block
- * and page blobs.
- */
-public class TestBlobTypeSpeedDifference extends TestCase {
- /**
- * Writes data to the given stream of the given size, flushing every
- * x bytes.
- */
- private static void writeTestFile(OutputStream writeStream,
- long size, long flushInterval) throws IOException {
- int bufferSize = (int) Math.min(1000, flushInterval);
- byte[] buffer = new byte[bufferSize];
- Arrays.fill(buffer, (byte) 7);
- int bytesWritten = 0;
- int bytesUnflushed = 0;
- while (bytesWritten < size) {
- int numberToWrite = (int) Math.min(bufferSize, size - bytesWritten);
- writeStream.write(buffer, 0, numberToWrite);
- bytesWritten += numberToWrite;
- bytesUnflushed += numberToWrite;
- if (bytesUnflushed >= flushInterval) {
- writeStream.flush();
- bytesUnflushed = 0;
- }
- }
- }
-
- private static class TestResult {
- final long timeTakenInMs;
- final long totalNumberOfRequests;
-
- TestResult(long timeTakenInMs, long totalNumberOfRequests) {
- this.timeTakenInMs = timeTakenInMs;
- this.totalNumberOfRequests = totalNumberOfRequests;
- }
- }
-
- /**
- * Writes data to the given file of the given size, flushing every
- * x bytes. Measure performance of that and return it.
- */
- private static TestResult writeTestFile(NativeAzureFileSystem fs, Path path,
- long size, long flushInterval) throws IOException {
- AzureFileSystemInstrumentation instrumentation =
- fs.getInstrumentation();
- long initialRequests = instrumentation.getCurrentWebResponses();
- Date start = new Date();
- OutputStream output = fs.create(path);
- writeTestFile(output, size, flushInterval);
- output.close();
- long finalRequests = instrumentation.getCurrentWebResponses();
- return new TestResult(new Date().getTime() - start.getTime(),
- finalRequests - initialRequests);
- }
-
- /**
- * Writes data to a block blob of the given size, flushing every
- * x bytes. Measure performance of that and return it.
- */
- private static TestResult writeBlockBlobTestFile(NativeAzureFileSystem fs,
- long size, long flushInterval) throws IOException {
- return writeTestFile(fs, new Path("/blockBlob"), size, flushInterval);
- }
-
- /**
- * Writes data to a page blob of the given size, flushing every
- * x bytes. Measure performance of that and return it.
- */
- private static TestResult writePageBlobTestFile(NativeAzureFileSystem fs,
- long size, long flushInterval) throws IOException {
- return writeTestFile(fs,
- AzureBlobStorageTestAccount.pageBlobPath("pageBlob"),
- size, flushInterval);
- }
-
- /**
- * Runs the benchmark over a small 10 KB file, flushing every 500 bytes.
- */
- @Test
- public void testTenKbFileFrequentFlush() throws Exception {
- AzureBlobStorageTestAccount testAccount =
- AzureBlobStorageTestAccount.create();
- if (testAccount == null) {
- return;
- }
- try {
- testForSizeAndFlushInterval(testAccount.getFileSystem(), 10 * 1000, 500);
- } finally {
- testAccount.cleanup();
- }
- }
-
- /**
- * Runs the benchmark for the given file size and flush frequency.
- */
- private static void testForSizeAndFlushInterval(NativeAzureFileSystem fs,
- final long size, final long flushInterval) throws IOException {
- for (int i = 0; i < 5; i++) {
- TestResult pageBlobResults = writePageBlobTestFile(fs, size, flushInterval);
- System.out.printf(
- "Page blob upload took %d ms. Total number of requests: %d.\n",
- pageBlobResults.timeTakenInMs, pageBlobResults.totalNumberOfRequests);
- TestResult blockBlobResults = writeBlockBlobTestFile(fs, size, flushInterval);
- System.out.printf(
- "Block blob upload took %d ms. Total number of requests: %d.\n",
- blockBlobResults.timeTakenInMs, blockBlobResults.totalNumberOfRequests);
- }
- }
-
- /**
- * Runs the benchmark for the given file size and flush frequency from the
- * command line.
- */
- public static void main(String argv[]) throws Exception {
- Configuration conf = new Configuration();
- long size = 10 * 1000 * 1000;
- long flushInterval = 2000;
- if (argv.length > 0) {
- size = Long.parseLong(argv[0]);
- }
- if (argv.length > 1) {
- flushInterval = Long.parseLong(argv[1]);
- }
- testForSizeAndFlushInterval((NativeAzureFileSystem)FileSystem.get(conf),
- size, flushInterval);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
deleted file mode 100644
index 0ae4012..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.EOFException;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Random;
-import java.util.concurrent.Callable;
-
-import org.junit.FixMethodOrder;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runners.MethodSorters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import static org.apache.hadoop.test.LambdaTestUtils.*;
-
-/**
- * Test semantics and performance of the original block blob input stream
- * (KEY_INPUT_STREAM_VERSION=1) and the new
- * <code>BlockBlobInputStream</code> (KEY_INPUT_STREAM_VERSION=2).
- */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-
-public class TestBlockBlobInputStream extends AbstractWasbTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(
- TestBlockBlobInputStream.class);
- private static final int KILOBYTE = 1024;
- private static final int MEGABYTE = KILOBYTE * KILOBYTE;
- private static final int TEST_FILE_SIZE = 6 * MEGABYTE;
- private static final Path TEST_FILE_PATH = new Path(
- "TestBlockBlobInputStream.txt");
-
- private AzureBlobStorageTestAccount accountUsingInputStreamV1;
- private AzureBlobStorageTestAccount accountUsingInputStreamV2;
- private long testFileLength;
-
- /**
- * Long test timeout.
- */
- @Rule
- public Timeout testTimeout = new Timeout(10 * 60 * 1000);
- private FileStatus testFileStatus;
- private Path hugefile;
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- Configuration conf = new Configuration();
- conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
-
- accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
- conf,
- true);
-
- accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
- null,
- true);
-
- assumeNotNull(accountUsingInputStreamV1);
- assumeNotNull(accountUsingInputStreamV2);
- hugefile = fs.makeQualified(TEST_FILE_PATH);
- try {
- testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
- testFileLength = testFileStatus.getLen();
- } catch (FileNotFoundException e) {
- // file doesn't exist
- testFileLength = 0;
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
-
- accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
- conf,
- true);
-
- accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
- "testblockblobinputstream",
- EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
- null,
- true);
-
- assumeNotNull(accountUsingInputStreamV1);
- assumeNotNull(accountUsingInputStreamV2);
- return accountUsingInputStreamV1;
- }
-
- /**
- * Create a test file by repeating the characters in the alphabet.
- * @throws IOException
- */
- private void createTestFileAndSetLength() throws IOException {
- FileSystem fs = accountUsingInputStreamV1.getFileSystem();
-
- // To reduce test run time, the test file can be reused.
- if (fs.exists(TEST_FILE_PATH)) {
- testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
- testFileLength = testFileStatus.getLen();
- LOG.info("Reusing test file: {}", testFileStatus);
- return;
- }
-
- int sizeOfAlphabet = ('z' - 'a' + 1);
- byte[] buffer = new byte[26 * KILOBYTE];
- char character = 'a';
- for (int i = 0; i < buffer.length; i++) {
- buffer[i] = (byte) character;
- character = (character == 'z') ? 'a' : (char) ((int) character + 1);
- }
-
- LOG.info("Creating test file {} of size: {}", TEST_FILE_PATH,
- TEST_FILE_SIZE);
- ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
-
- try(FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) {
- int bytesWritten = 0;
- while (bytesWritten < TEST_FILE_SIZE) {
- outputStream.write(buffer);
- bytesWritten += buffer.length;
- }
- LOG.info("Closing stream {}", outputStream);
- ContractTestUtils.NanoTimer closeTimer
- = new ContractTestUtils.NanoTimer();
- outputStream.close();
- closeTimer.end("time to close() output stream");
- }
- timer.end("time to write %d KB", TEST_FILE_SIZE / 1024);
- testFileLength = fs.getFileStatus(TEST_FILE_PATH).getLen();
- }
-
- void assumeHugeFileExists() throws IOException {
- ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
- FileStatus status = fs.getFileStatus(hugefile);
- ContractTestUtils.assertIsFile(hugefile, status);
- assertTrue("File " + hugefile + " is empty", status.getLen() > 0);
- }
-
- /**
- * Calculate megabits per second from the specified values for bytes and
- * milliseconds.
- * @param bytes The number of bytes.
- * @param milliseconds The number of milliseconds.
- * @return The number of megabits per second.
- */
- private static double toMbps(long bytes, long milliseconds) {
- return bytes / 1000.0 * 8 / milliseconds;
- }
-
- @Test
- public void test_0100_CreateHugeFile() throws IOException {
- createTestFileAndSetLength();
- }
-
- @Test
- public void test_0200_BasicReadTest() throws Exception {
- assumeHugeFileExists();
-
- try (
- FSDataInputStream inputStreamV1
- = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
-
- FSDataInputStream inputStreamV2
- = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
- ) {
- byte[] bufferV1 = new byte[3 * MEGABYTE];
- byte[] bufferV2 = new byte[bufferV1.length];
-
- // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
- inputStreamV1.seek(5 * MEGABYTE);
- int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
- assertEquals(KILOBYTE, numBytesReadV1);
-
- // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
- inputStreamV2.seek(5 * MEGABYTE);
- int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
- assertEquals(KILOBYTE, numBytesReadV2);
-
- assertArrayEquals(bufferV1, bufferV2);
-
- int len = MEGABYTE;
- int offset = bufferV1.length - len;
-
- // v1 reverse seek and read a megabyte into last megabyte of bufferV1
- inputStreamV1.seek(3 * MEGABYTE);
- numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
- assertEquals(len, numBytesReadV1);
-
- // v2 reverse seek and read a megabyte into last megabyte of bufferV2
- inputStreamV2.seek(3 * MEGABYTE);
- numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
- assertEquals(len, numBytesReadV2);
-
- assertArrayEquals(bufferV1, bufferV2);
- }
- }
-
- @Test
- public void test_0201_RandomReadTest() throws Exception {
- assumeHugeFileExists();
-
- try (
- FSDataInputStream inputStreamV1
- = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
-
- FSDataInputStream inputStreamV2
- = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
- ) {
- final int bufferSize = 4 * KILOBYTE;
- byte[] bufferV1 = new byte[bufferSize];
- byte[] bufferV2 = new byte[bufferV1.length];
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- inputStreamV1.seek(0);
- inputStreamV2.seek(0);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- int seekPosition = 2 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- inputStreamV1.seek(0);
- inputStreamV2.seek(0);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- seekPosition = 5 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- seekPosition = 10 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
-
- seekPosition = 4100 * KILOBYTE;
- inputStreamV1.seek(seekPosition);
- inputStreamV2.seek(seekPosition);
-
- verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
- }
- }
-
- private void verifyConsistentReads(FSDataInputStream inputStreamV1,
- FSDataInputStream inputStreamV2,
- byte[] bufferV1,
- byte[] bufferV2) throws IOException {
- int size = bufferV1.length;
- final int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, size);
- assertEquals("Bytes read from V1 stream", size, numBytesReadV1);
-
- final int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, size);
- assertEquals("Bytes read from V2 stream", size, numBytesReadV2);
-
- assertArrayEquals("Mismatch in read data", bufferV1, bufferV2);
- }
-
- /**
- * Validates the implementation of InputStream.markSupported.
- * @throws IOException
- */
- @Test
- public void test_0301_MarkSupportedV1() throws IOException {
- validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.markSupported.
- * @throws IOException
- */
- @Test
- public void test_0302_MarkSupportedV2() throws IOException {
- validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
- }
-
- private void validateMarkSupported(FileSystem fs) throws IOException {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- assertTrue("mark is not supported", inputStream.markSupported());
- }
- }
-
- /**
- * Validates the implementation of InputStream.mark and reset
- * for version 1 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0303_MarkAndResetV1() throws Exception {
- validateMarkAndReset(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.mark and reset
- * for version 2 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0304_MarkAndResetV2() throws Exception {
- validateMarkAndReset(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateMarkAndReset(FileSystem fs) throws Exception {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- inputStream.mark(KILOBYTE - 1);
-
- byte[] buffer = new byte[KILOBYTE];
- int bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
-
- inputStream.reset();
- assertEquals("rest -> pos 0", 0, inputStream.getPos());
-
- inputStream.mark(8 * KILOBYTE - 1);
-
- buffer = new byte[8 * KILOBYTE];
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
-
- intercept(IOException.class,
- "Resetting to invalid mark",
- new Callable<FSDataInputStream>() {
- @Override
- public FSDataInputStream call() throws Exception {
- inputStream.reset();
- return inputStream;
- }
- }
- );
- }
- }
-
- /**
- * Validates the implementation of Seekable.seekToNewSource, which should
- * return false for version 1 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0305_SeekToNewSourceV1() throws IOException {
- validateSeekToNewSource(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of Seekable.seekToNewSource, which should
- * return false for version 2 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0306_SeekToNewSourceV2() throws IOException {
- validateSeekToNewSource(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSeekToNewSource(FileSystem fs) throws IOException {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- assertFalse(inputStream.seekToNewSource(0));
- }
- }
-
- /**
- * Validates the implementation of InputStream.skip and ensures there is no
- * network I/O for version 1 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0307_SkipBoundsV1() throws Exception {
- validateSkipBounds(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.skip and ensures there is no
- * network I/O for version 2 of the block blob input stream.
- * @throws Exception
- */
- @Test
- public void test_0308_SkipBoundsV2() throws Exception {
- validateSkipBounds(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSkipBounds(FileSystem fs) throws Exception {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- NanoTimer timer = new NanoTimer();
-
- long skipped = inputStream.skip(-1);
- assertEquals(0, skipped);
-
- skipped = inputStream.skip(0);
- assertEquals(0, skipped);
-
- assertTrue(testFileLength > 0);
-
- skipped = inputStream.skip(testFileLength);
- assertEquals(testFileLength, skipped);
-
- intercept(EOFException.class,
- new Callable<Long>() {
- @Override
- public Long call() throws Exception {
- return inputStream.skip(1);
- }
- }
- );
- long elapsedTimeMs = timer.elapsedTimeMs();
- assertTrue(
- String.format(
- "There should not be any network I/O (elapsedTimeMs=%1$d).",
- elapsedTimeMs),
- elapsedTimeMs < 20);
- }
- }
-
- /**
- * Validates the implementation of Seekable.seek and ensures there is no
- * network I/O for forward seek.
- * @throws Exception
- */
- @Test
- public void test_0309_SeekBoundsV1() throws Exception {
- validateSeekBounds(accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of Seekable.seek and ensures there is no
- * network I/O for forward seek.
- * @throws Exception
- */
- @Test
- public void test_0310_SeekBoundsV2() throws Exception {
- validateSeekBounds(accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSeekBounds(FileSystem fs) throws Exception {
- assumeHugeFileExists();
- try (
- FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
- ) {
- NanoTimer timer = new NanoTimer();
-
- inputStream.seek(0);
- assertEquals(0, inputStream.getPos());
-
- intercept(EOFException.class,
- FSExceptionMessages.NEGATIVE_SEEK,
- new Callable<FSDataInputStream>() {
- @Override
- public FSDataInputStream call() throws Exception {
- inputStream.seek(-1);
- return inputStream;
- }
- }
- );
-
- assertTrue("Test file length only " + testFileLength, testFileLength > 0);
- inputStream.seek(testFileLength);
- assertEquals(testFileLength, inputStream.getPos());
-
- intercept(EOFException.class,
- FSExceptionMessages.CANNOT_SEEK_PAST_EOF,
- new Callable<FSDataInputStream>() {
- @Override
- public FSDataInputStream call() throws Exception {
- inputStream.seek(testFileLength + 1);
- return inputStream;
- }
- }
- );
-
- long elapsedTimeMs = timer.elapsedTimeMs();
- assertTrue(
- String.format(
- "There should not be any network I/O (elapsedTimeMs=%1$d).",
- elapsedTimeMs),
- elapsedTimeMs < 20);
- }
- }
-
- /**
- * Validates the implementation of Seekable.seek, Seekable.getPos,
- * and InputStream.available.
- * @throws Exception
- */
- @Test
- public void test_0311_SeekAndAvailableAndPositionV1() throws Exception {
- validateSeekAndAvailableAndPosition(
- accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of Seekable.seek, Seekable.getPos,
- * and InputStream.available.
- * @throws Exception
- */
- @Test
- public void test_0312_SeekAndAvailableAndPositionV2() throws Exception {
- validateSeekAndAvailableAndPosition(
- accountUsingInputStreamV2.getFileSystem());
- }
-
- private void validateSeekAndAvailableAndPosition(FileSystem fs)
- throws Exception {
- assumeHugeFileExists();
- try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
- byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
- byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
- byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
- byte[] buffer = new byte[3];
-
- int bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected1, buffer);
- assertEquals(buffer.length, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected2, buffer);
- assertEquals(2 * buffer.length, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // reverse seek
- int seekPos = 0;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected1, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // reverse seek
- seekPos = 1;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected3, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // forward seek
- seekPos = 6;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected4, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- }
- }
-
- /**
- * Validates the implementation of InputStream.skip, Seekable.getPos,
- * and InputStream.available.
- * @throws IOException
- */
- @Test
- public void test_0313_SkipAndAvailableAndPositionV1() throws IOException {
- validateSkipAndAvailableAndPosition(
- accountUsingInputStreamV1.getFileSystem());
- }
-
- /**
- * Validates the implementation of InputStream.skip, Seekable.getPos,
- * and InputStream.available.
- * @throws IOException
- */
- @Test
- public void test_0314_SkipAndAvailableAndPositionV2() throws IOException {
- validateSkipAndAvailableAndPosition(
- accountUsingInputStreamV1.getFileSystem());
- }
-
- private void validateSkipAndAvailableAndPosition(FileSystem fs)
- throws IOException {
- assumeHugeFileExists();
- try (
- FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
- ) {
- byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
- byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
- byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
- byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
-
- assertEquals(testFileLength, inputStream.available());
- assertEquals(0, inputStream.getPos());
-
- int n = 3;
- long skipped = inputStream.skip(n);
-
- assertEquals(skipped, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- assertEquals(skipped, n);
-
- byte[] buffer = new byte[3];
- int bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected2, buffer);
- assertEquals(buffer.length + skipped, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- // does skip still work after seek?
- int seekPos = 1;
- inputStream.seek(seekPos);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected3, buffer);
- assertEquals(buffer.length + seekPos, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
-
- long currentPosition = inputStream.getPos();
- n = 2;
- skipped = inputStream.skip(n);
-
- assertEquals(currentPosition + skipped, inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- assertEquals(skipped, n);
-
- bytesRead = inputStream.read(buffer);
- assertEquals(buffer.length, bytesRead);
- assertArrayEquals(expected4, buffer);
- assertEquals(buffer.length + skipped + currentPosition,
- inputStream.getPos());
- assertEquals(testFileLength - inputStream.getPos(),
- inputStream.available());
- }
- }
-
- /**
- * Ensures parity in the performance of sequential read for
- * version 1 and version 2 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0315_SequentialReadPerformance() throws IOException {
- assumeHugeFileExists();
- final int maxAttempts = 10;
- final double maxAcceptableRatio = 1.01;
- double v1ElapsedMs = 0, v2ElapsedMs = 0;
- double ratio = Double.MAX_VALUE;
- for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
- v1ElapsedMs = sequentialRead(1,
- accountUsingInputStreamV1.getFileSystem(), false);
- v2ElapsedMs = sequentialRead(2,
- accountUsingInputStreamV2.getFileSystem(), false);
- ratio = v2ElapsedMs / v1ElapsedMs;
- LOG.info(String.format(
- "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio));
- }
- assertTrue(String.format(
- "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
- + " v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio),
- ratio < maxAcceptableRatio);
- }
-
- /**
- * Ensures parity in the performance of sequential read after reverse seek for
- * version 2 of the block blob input stream.
- * @throws IOException
- */
- @Test
- public void test_0316_SequentialReadAfterReverseSeekPerformanceV2()
- throws IOException {
- assumeHugeFileExists();
- final int maxAttempts = 10;
- final double maxAcceptableRatio = 1.01;
- double beforeSeekElapsedMs = 0, afterSeekElapsedMs = 0;
- double ratio = Double.MAX_VALUE;
- for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
- beforeSeekElapsedMs = sequentialRead(2,
- accountUsingInputStreamV2.getFileSystem(), false);
- afterSeekElapsedMs = sequentialRead(2,
- accountUsingInputStreamV2.getFileSystem(), true);
- ratio = afterSeekElapsedMs / beforeSeekElapsedMs;
- LOG.info(String.format(
- "beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d, ratio=%3$.2f",
- (long) beforeSeekElapsedMs,
- (long) afterSeekElapsedMs,
- ratio));
- }
- assertTrue(String.format(
- "Performance of version 2 after reverse seek is not acceptable:"
- + " beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d,"
- + " ratio=%3$.2f",
- (long) beforeSeekElapsedMs,
- (long) afterSeekElapsedMs,
- ratio),
- ratio < maxAcceptableRatio);
- }
-
- private long sequentialRead(int version,
- FileSystem fs,
- boolean afterReverseSeek) throws IOException {
- byte[] buffer = new byte[16 * KILOBYTE];
- long totalBytesRead = 0;
- long bytesRead = 0;
-
- try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- if (afterReverseSeek) {
- while (bytesRead > 0 && totalBytesRead < 4 * MEGABYTE) {
- bytesRead = inputStream.read(buffer);
- totalBytesRead += bytesRead;
- }
- totalBytesRead = 0;
- inputStream.seek(0);
- }
-
- NanoTimer timer = new NanoTimer();
- while ((bytesRead = inputStream.read(buffer)) > 0) {
- totalBytesRead += bytesRead;
- }
- long elapsedTimeMs = timer.elapsedTimeMs();
-
- LOG.info(String.format(
- "v%1$d: bytesRead=%2$d, elapsedMs=%3$d, Mbps=%4$.2f,"
- + " afterReverseSeek=%5$s",
- version,
- totalBytesRead,
- elapsedTimeMs,
- toMbps(totalBytesRead, elapsedTimeMs),
- afterReverseSeek));
-
- assertEquals(testFileLength, totalBytesRead);
- inputStream.close();
- return elapsedTimeMs;
- }
- }
-
- @Test
- public void test_0317_RandomReadPerformance() throws IOException {
- assumeHugeFileExists();
- final int maxAttempts = 10;
- final double maxAcceptableRatio = 0.10;
- double v1ElapsedMs = 0, v2ElapsedMs = 0;
- double ratio = Double.MAX_VALUE;
- for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
- v1ElapsedMs = randomRead(1,
- accountUsingInputStreamV1.getFileSystem());
- v2ElapsedMs = randomRead(2,
- accountUsingInputStreamV2.getFileSystem());
- ratio = v2ElapsedMs / v1ElapsedMs;
- LOG.info(String.format(
- "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio));
- }
- assertTrue(String.format(
- "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
- + " v2ElapsedMs=%2$d, ratio=%3$.2f",
- (long) v1ElapsedMs,
- (long) v2ElapsedMs,
- ratio),
- ratio < maxAcceptableRatio);
- }
-
- private long randomRead(int version, FileSystem fs) throws IOException {
- assumeHugeFileExists();
- final int minBytesToRead = 2 * MEGABYTE;
- Random random = new Random();
- byte[] buffer = new byte[8 * KILOBYTE];
- long totalBytesRead = 0;
- long bytesRead = 0;
- try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
- NanoTimer timer = new NanoTimer();
-
- do {
- bytesRead = inputStream.read(buffer);
- totalBytesRead += bytesRead;
- inputStream.seek(random.nextInt(
- (int) (testFileLength - buffer.length)));
- } while (bytesRead > 0 && totalBytesRead < minBytesToRead);
-
- long elapsedTimeMs = timer.elapsedTimeMs();
-
- inputStream.close();
-
- LOG.info(String.format(
- "v%1$d: totalBytesRead=%2$d, elapsedTimeMs=%3$d, Mbps=%4$.2f",
- version,
- totalBytesRead,
- elapsedTimeMs,
- toMbps(totalBytesRead, elapsedTimeMs)));
-
- assertTrue(minBytesToRead <= totalBytesRead);
-
- return elapsedTimeMs;
- }
- }
-
- @Test
- public void test_999_DeleteHugeFiles() throws IOException {
- ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
- fs.delete(TEST_FILE_PATH, false);
- timer.end("time to delete %s", TEST_FILE_PATH);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
index 307e5af..c2496d7 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java
@@ -21,13 +21,10 @@ package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
import org.junit.Test;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-
/**
* Tests for <code>ClientThrottlingAnalyzer</code>.
*/
-public class TestClientThrottlingAnalyzer {
+public class TestClientThrottlingAnalyzer extends AbstractWasbTestWithTimeout {
private static final int ANALYSIS_PERIOD = 1000;
private static final int ANALYSIS_PERIOD_PLUS_10_PERCENT = ANALYSIS_PERIOD
+ ANALYSIS_PERIOD / 10;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
new file mode 100644
index 0000000..4389fda
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
@@ -0,0 +1,821 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
+ */
+public class ITestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
+
+ private final int renameThreads = 10;
+ private final int deleteThreads = 20;
+ private int iterations = 1;
+ private LogCapturer logs = null;
+
+ @Rule
+ public ExpectedException exception = ExpectedException.none();
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ Configuration conf = fs.getConf();
+
+ // By default enable parallel threads for rename and delete operations.
+ // Also enable flat listing of blobs for these operations.
+ conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, renameThreads);
+ conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, deleteThreads);
+ conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, true);
+
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ // Capture logs
+ logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
+ .getRootLogger()));
+ }
+
+ /*
+ * Helper method to create sub directory and different types of files
+ * for multiple iterations.
+ */
+ private void createFolder(FileSystem fs, String root) throws Exception {
+ fs.mkdirs(new Path(root));
+ for (int i = 0; i < this.iterations; i++) {
+ fs.mkdirs(new Path(root + "/" + i));
+ fs.createNewFile(new Path(root + "/" + i + "/fileToRename"));
+ fs.createNewFile(new Path(root + "/" + i + "/file/to/rename"));
+ fs.createNewFile(new Path(root + "/" + i + "/file+to%rename"));
+ fs.createNewFile(new Path(root + "/fileToRename" + i));
+ }
+ }
+
+ /*
+ * Helper method to do rename operation and validate all files in source folder
+ * doesn't exists and similar files exists in new folder.
+ */
+ private void validateRenameFolder(FileSystem fs, String source, String dest) throws Exception {
+ // Create source folder with files.
+ createFolder(fs, source);
+ Path sourceFolder = new Path(source);
+ Path destFolder = new Path(dest);
+
+ // rename operation
+ assertTrue(fs.rename(sourceFolder, destFolder));
+ assertTrue(fs.exists(destFolder));
+
+ for (int i = 0; i < this.iterations; i++) {
+ // Check destination folder and files exists.
+ assertTrue(fs.exists(new Path(dest + "/" + i)));
+ assertTrue(fs.exists(new Path(dest + "/" + i + "/fileToRename")));
+ assertTrue(fs.exists(new Path(dest + "/" + i + "/file/to/rename")));
+ assertTrue(fs.exists(new Path(dest + "/" + i + "/file+to%rename")));
+ assertTrue(fs.exists(new Path(dest + "/fileToRename" + i)));
+
+ // Check source folder and files doesn't exists.
+ assertFalse(fs.exists(new Path(source + "/" + i)));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
+ assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
+ }
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameSmallFolderWithThreads() throws Exception {
+
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // With single iteration, we would have created 7 blobs.
+ int expectedThreadsCreated = Math.min(7, renameThreads);
+
+ // Validate from logs that threads are created.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + expectedThreadsCreated);
+
+ // Validate thread executions
+ for (int i = 0; i < expectedThreadsCreated; i++) {
+ assertInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+
+ // Also ensure that we haven't spawned extra threads.
+ if (expectedThreadsCreated < renameThreads) {
+ for (int i = expectedThreadsCreated; i < renameThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameLargeFolderWithThreads() throws Exception {
+
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // Validate from logs that threads are created.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + renameThreads);
+
+ // Validate thread executions
+ for (int i = 0; i < renameThreads; i++) {
+ assertInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for rename operation with threads disabled and flat listing enabled.
+ */
+ @Test
+ public void testRenameLargeFolderDisableThreads() throws Exception {
+ Configuration conf = fs.getConf();
+
+ // Number of threads set to 0 or 1 disables threads.
+ conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 0);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Rename operation as thread count 0");
+
+ // Validate no thread executions
+ for (int i = 0; i < renameThreads; i++) {
+ String term = "AzureBlobRenameThread-"
+ + Thread.currentThread().getName()
+ + "-" + i;
+ assertNotInLog(content, term);
+ }
+ }
+
+ /**
+ * Assert that a log contains the given term.
+ * @param content log output
+ * @param term search term
+ */
+ protected void assertInLog(String content, String term) {
+ assertTrue("Empty log", !content.isEmpty());
+ if (!content.contains(term)) {
+ String message = "No " + term + " found in logs";
+ LOG.error(message);
+ System.err.println(content);
+ fail(message);
+ }
+ }
+
+ /**
+ * Assert that a log does not contain the given term.
+ * @param content log output
+ * @param term search term
+ */
+ protected void assertNotInLog(String content, String term) {
+ assertTrue("Empty log", !content.isEmpty());
+ if (content.contains(term)) {
+ String message = term + " found in logs";
+ LOG.error(message);
+ System.err.println(content);
+ fail(message);
+ }
+ }
+
+ /*
+ * Test case for rename operation with threads and flat listing disabled.
+ */
+ @Test
+ public void testRenameSmallFolderDisableThreadsDisableFlatListing() throws Exception {
+ Configuration conf = fs.getConf();
+ conf = fs.getConf();
+
+ // Number of threads set to 0 or 1 disables threads.
+ conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 1);
+ conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Rename operation as thread count 1");
+
+ // Validate no thread executions
+ for (int i = 0; i < renameThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Helper method to do delete operation and validate all files in source folder
+ * doesn't exists after delete operation.
+ */
+ private void validateDeleteFolder(FileSystem fs, String source) throws Exception {
+ // Create folder with files.
+ createFolder(fs, "root");
+ Path sourceFolder = new Path(source);
+
+ // Delete operation
+ assertTrue(fs.delete(sourceFolder, true));
+ assertFalse(fs.exists(sourceFolder));
+
+ for (int i = 0; i < this.iterations; i++) {
+ // check that source folder and files doesn't exists
+ assertFalse(fs.exists(new Path(source + "/" + i)));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
+ assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
+ }
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteSmallFolderWithThreads() throws Exception {
+
+ validateDeleteFolder(fs, "root");
+
+ // With single iteration, we would have created 7 blobs.
+ int expectedThreadsCreated = Math.min(7, deleteThreads);
+
+ // Validate from logs that threads are enabled.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + expectedThreadsCreated);
+
+ // Validate thread executions
+ for (int i = 0; i < expectedThreadsCreated; i++) {
+ assertInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+
+ // Also ensure that we haven't spawned extra threads.
+ if (expectedThreadsCreated < deleteThreads) {
+ for (int i = expectedThreadsCreated; i < deleteThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteLargeFolderWithThreads() throws Exception {
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateDeleteFolder(fs, "root");
+
+ // Validate from logs that threads are enabled.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + deleteThreads);
+
+ // Validate thread executions
+ for (int i = 0; i < deleteThreads; i++) {
+ assertInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for delete operation with threads disabled and flat listing enabled.
+ */
+ @Test
+ public void testDeleteLargeFolderDisableThreads() throws Exception {
+ Configuration conf = fs.getConf();
+ conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 0);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateDeleteFolder(fs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Delete operation as thread count 0");
+
+ // Validate no thread executions
+ for (int i = 0; i < deleteThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for rename operation with threads and flat listing disabled.
+ */
+ @Test
+ public void testDeleteSmallFolderDisableThreadsDisableFlatListing() throws Exception {
+ Configuration conf = fs.getConf();
+
+ // Number of threads set to 0 or 1 disables threads.
+ conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 1);
+ conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ validateDeleteFolder(fs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Delete operation as thread count 1");
+
+ // Validate no thread executions
+ for (int i = 0; i < deleteThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolExceptionFailure() throws Exception {
+
+ // Spy azure file system object and raise exception for new thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ validateDeleteFolder(mockFs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content, "Failed to create thread pool with threads");
+ assertInLog(content, "Serializing the Delete operation");
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolExecuteFailure() throws Exception {
+
+ // Mock thread pool executor to throw exception for all requests.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ validateDeleteFolder(mockFs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Rejected execution of thread for Delete operation on blob");
+ assertInLog(content, "Serializing the Delete operation");
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolExecuteSingleThreadFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ // Create a thread executor and link it to mocked thread pool executor object.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // Mock thread executor to throw exception for all requests.
+ Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ validateDeleteFolder(mockFs, "root");
+
+ // Validate from logs that threads are enabled and unused threads.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads 7");
+ assertInLog(content,
+ "6 threads not used for Delete operation on blob");
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolTerminationFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ ((NativeAzureFileSystem) fs).getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+
+ // Create a thread executor and link it to mocked thread pool executor object.
+ // Mock thread executor to throw exception for terminating threads.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+ Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
+
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+ boolean exception = false;
+ try {
+ mockFs.delete(sourceFolder, true);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and delete operation is failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads");
+ assertInLog(content, "Threads got interrupted Delete blob operation");
+ assertInLog(content,
+ "Delete failed as operation on subfolders and files failed.");
+ }
+
+ /*
+ * Validate that when a directory is deleted recursively, the operation succeeds
+ * even if a child directory delete fails because the directory does not exist.
+ * This can happen if a child directory is deleted by an external agent while
+ * the parent is in progress of being deleted recursively.
+ */
+ @Test
+ public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted()
+ throws Exception {
+ testRecusiveDirectoryDelete(true);
+ }
+
+ /*
+ * Validate that when a directory is deleted recursively, the operation succeeds
+ * even if a file delete fails because it does not exist.
+ * This can happen if a file is deleted by an external agent while
+ * the parent directory is in progress of being deleted.
+ */
+ @Test
+ public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse()
+ throws Exception {
+ testRecusiveDirectoryDelete(false);
+ }
+
+ private void testRecusiveDirectoryDelete(boolean useDir) throws Exception {
+ String childPathToBeDeletedByExternalAgent = (useDir)
+ ? "root/0"
+ : "root/0/fileToRename";
+ // Spy azure file system object and return false for deleting one file
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path(
+ childPathToBeDeletedByExternalAgent)));
+
+ Answer<Boolean> answer = new Answer<Boolean>() {
+ public Boolean answer(InvocationOnMock invocation) throws Throwable {
+ String path = (String) invocation.getArguments()[0];
+ boolean isDir = (boolean) invocation.getArguments()[1];
+ boolean realResult = fs.deleteFile(path, isDir);
+ assertTrue(realResult);
+ boolean fakeResult = false;
+ return fakeResult;
+ }
+ };
+
+ Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer);
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+
+ assertTrue(mockFs.delete(sourceFolder, true));
+ assertFalse(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled, that a child directory was
+ // deleted by an external caller, and the parent delete operation still
+ // succeeds.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads");
+ assertInLog(content, String.format("Attempt to delete non-existent %s %s",
+ useDir ? "directory" : "file", path));
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteSingleDeleteException() throws Exception {
+
+ // Spy azure file system object and raise exception for deleting one file
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
+ Mockito.doThrow(new IOException()).when(mockFs).deleteFile(path, true);
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+
+ boolean exception = false;
+ try {
+ mockFs.delete(sourceFolder, true);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and delete operation failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads");
+ assertInLog(content,
+ "Encountered Exception for Delete operation for file " + path);
+ assertInLog(content,
+ "Terminating execution of Delete operation now as some other thread already got exception or operation failed");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolExceptionFailure() throws Exception {
+
+ // Spy azure file system object and raise exception for new thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ ((NativeAzureFileSystem) fs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.doReturn(mockThreadPoolExecutor).when(mockFs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS);
+
+ validateRenameFolder(mockFs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content, "Failed to create thread pool with threads");
+ assertInLog(content, "Serializing the Rename operation");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolExecuteFailure() throws Exception {
+
+ // Mock thread pool executor to throw exception for all requests.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ validateRenameFolder(mockFs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Rejected execution of thread for Rename operation on blob");
+ assertInLog(content, "Serializing the Rename operation");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolExecuteSingleThreadFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ // Create a thread executor and link it to mocked thread pool executor object.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // Mock thread executor to throw exception for all requests.
+ Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ validateRenameFolder(mockFs, "root", "rootnew");
+
+ // Validate from logs that threads are enabled and unused threads exists.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Rename operation with threads 7");
+ assertInLog(content,
+ "6 threads not used for Rename operation on blob");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolTerminationFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ // Mock thread executor to throw exception for all requests.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+ Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+ Path destFolder = new Path("rootnew");
+ boolean exception = false;
+ try {
+ mockFs.rename(sourceFolder, destFolder);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and rename operation is failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Rename operation with threads");
+ assertInLog(content, "Threads got interrupted Rename blob operation");
+ assertInLog(content,
+ "Rename failed as operation on subfolders and files failed.");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameSingleRenameException() throws Exception {
+
+ // Spy azure file system object and raise exception for deleting one file
+ Path sourceFolder = new Path("root");
+ Path destFolder = new Path("rootnew");
+
+ // Spy azure file system object and populate rename pending spy object.
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Populate data now only such that rename pending spy object would see this data.
+ createFolder(mockFs, "root");
+
+ String srcKey = mockFs.pathToKey(mockFs.makeAbsolute(sourceFolder));
+ String dstKey = mockFs.pathToKey(mockFs.makeAbsolute(destFolder));
+
+ FolderRenamePending mockRenameFs = Mockito.spy(mockFs.prepareAtomicFolderRename(srcKey, dstKey));
+ Mockito.when(mockFs.prepareAtomicFolderRename(srcKey, dstKey)).thenReturn(mockRenameFs);
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
+ Mockito.doThrow(new IOException()).when(mockRenameFs).renameFile(Mockito.any(FileMetadata.class));
+
+ boolean exception = false;
+ try {
+ mockFs.rename(sourceFolder, destFolder);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and delete operation failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Rename operation with threads");
+ assertInLog(content,
+ "Encountered Exception for Rename operation for file " + path);
+ assertInLog(content,
+ "Terminating execution of Rename operation now as some other thread already got exception or operation failed");
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
new file mode 100644
index 0000000..d7e4831
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+
+import static org.apache.hadoop.fs.azure.SecureStorageInterfaceImpl.KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS;
+
+/**
+ * Test class to hold all WASB authorization tests that use blob-specific keys
+ * to access storage.
+ */
+public class ITestNativeAzureFSAuthWithBlobSpecificKeys
+ extends ITestNativeAzureFileSystemAuthorizationWithOwner {
+
+
+ @Override
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
+ conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
+ return conf;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java
new file mode 100644
index 0000000..c73b1cc
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE;
+
+/**
+ * Test class to hold all WASB authorization caching related tests.
+ */
+public class ITestNativeAzureFSAuthorizationCaching
+ extends ITestNativeAzureFileSystemAuthorizationWithOwner {
+
+ private static final int DUMMY_TTL_VALUE = 5000;
+
+ @Override
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
+ conf.set(KEY_AUTH_SERVICE_CACHING_ENABLE, "true");
+ return conf;
+ }
+
+ /**
+ * Test to verify cache behavior -- assert that PUT overwrites value if present
+ */
+ @Test
+ public void testCachePut() throws Throwable {
+ CachingAuthorizer<String, Integer> cache = new CachingAuthorizer<>(DUMMY_TTL_VALUE, "TEST");
+ cache.init(createConfiguration());
+ cache.put("TEST", 1);
+ cache.put("TEST", 3);
+ int result = cache.get("TEST");
+ assertEquals("Cache returned unexpected result", 3, result);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java
new file mode 100644
index 0000000..a4d8729
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Run the base Azure file system tests strictly on page blobs to make sure fundamental
+ * operations on page blob files and folders work as expected.
+ * These operations include create, delete, rename, list, and so on.
+ */
+public class ITestNativeAzureFSPageBlobLive extends
+ NativeAzureFileSystemBaseTest {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount()
+ throws Exception {
+ Configuration conf = new Configuration();
+
+ // Configure the page blob directories key so every file created is a page blob.
+ conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
+
+ // Configure the atomic rename directories key so every folder will have
+ // atomic rename applied.
+ conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
new file mode 100644
index 0000000..29611bf
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Arrays;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import org.junit.Test;
+
+/**
+ * Test append operations.
+ */
+public class ITestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
+
+ private Path testPath;
+
+ @Override
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
+ conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME,
+ true);
+ return conf;
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = methodPath();
+ }
+
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(createConfiguration());
+ }
+
+ /*
+ * Helper method that creates test data of size provided by the
+ * "size" parameter.
+ */
+ private static byte[] getTestData(int size) {
+ byte[] testData = new byte[size];
+ System.arraycopy(RandomStringUtils.randomAlphabetic(size).getBytes(), 0, testData, 0, size);
+ return testData;
+ }
+
+ // Helper method to create file and write fileSize bytes of data on it.
+ private byte[] createBaseFileWithData(int fileSize, Path testPath) throws Throwable {
+
+ try(FSDataOutputStream createStream = fs.create(testPath)) {
+ byte[] fileData = null;
+
+ if (fileSize != 0) {
+ fileData = getTestData(fileSize);
+ createStream.write(fileData);
+ }
+ return fileData;
+ }
+ }
+
+ /*
+ * Helper method to verify a file data equal to "dataLength" parameter
+ */
+ private boolean verifyFileData(int dataLength, byte[] testData, int testDataIndex,
+ FSDataInputStream srcStream) {
+
+ try {
+
+ byte[] fileBuffer = new byte[dataLength];
+ byte[] testDataBuffer = new byte[dataLength];
+
+ int fileBytesRead = srcStream.read(fileBuffer);
+
+ if (fileBytesRead < dataLength) {
+ return false;
+ }
+
+ System.arraycopy(testData, testDataIndex, testDataBuffer, 0, dataLength);
+
+ if (!Arrays.equals(fileBuffer, testDataBuffer)) {
+ return false;
+ }
+
+ return true;
+
+ } catch (Exception ex) {
+ return false;
+ }
+
+ }
+
+ /*
+ * Helper method to verify Append on a testFile.
+ */
+ private boolean verifyAppend(byte[] testData, Path testFile) {
+
+ try(FSDataInputStream srcStream = fs.open(testFile)) {
+
+ int baseBufferSize = 2048;
+ int testDataSize = testData.length;
+ int testDataIndex = 0;
+
+ while (testDataSize > baseBufferSize) {
+
+ if (!verifyFileData(baseBufferSize, testData, testDataIndex, srcStream)) {
+ return false;
+ }
+ testDataIndex += baseBufferSize;
+ testDataSize -= baseBufferSize;
+ }
+
+ if (!verifyFileData(testDataSize, testData, testDataIndex, srcStream)) {
+ return false;
+ }
+
+ return true;
+ } catch(Exception ex) {
+ return false;
+ }
+ }
+
+ /*
+ * Test case to verify if an append on small size data works. This tests
+ * append E2E
+ */
+ @Test
+ public void testSingleAppend() throws Throwable{
+
+ FSDataOutputStream appendStream = null;
+ try {
+ int baseDataSize = 50;
+ byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
+
+ int appendDataSize = 20;
+ byte[] appendDataBuffer = getTestData(appendDataSize);
+ appendStream = fs.append(testPath, 10);
+ appendStream.write(appendDataBuffer);
+ appendStream.close();
+ byte[] testData = new byte[baseDataSize + appendDataSize];
+ System.arraycopy(baseDataBuffer, 0, testData, 0, baseDataSize);
+ System.arraycopy(appendDataBuffer, 0, testData, baseDataSize, appendDataSize);
+
+ assertTrue(verifyAppend(testData, testPath));
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ /*
+ * Test case to verify append to an empty file.
+ */
+ @Test
+ public void testSingleAppendOnEmptyFile() throws Throwable {
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+ createBaseFileWithData(0, testPath);
+
+ int appendDataSize = 20;
+ byte[] appendDataBuffer = getTestData(appendDataSize);
+ appendStream = fs.append(testPath, 10);
+ appendStream.write(appendDataBuffer);
+ appendStream.close();
+
+ assertTrue(verifyAppend(appendDataBuffer, testPath));
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ /*
+ * Test to verify that we can open only one Append stream on a File.
+ */
+ @Test
+ public void testSingleAppenderScenario() throws Throwable {
+
+ FSDataOutputStream appendStream1 = null;
+ FSDataOutputStream appendStream2 = null;
+ IOException ioe = null;
+ try {
+ createBaseFileWithData(0, testPath);
+ appendStream1 = fs.append(testPath, 10);
+ boolean encounteredException = false;
+ try {
+ appendStream2 = fs.append(testPath, 10);
+ } catch(IOException ex) {
+ encounteredException = true;
+ ioe = ex;
+ }
+
+ appendStream1.close();
+
+ assertTrue(encounteredException);
+ GenericTestUtils.assertExceptionContains("Unable to set Append lease on the Blob", ioe);
+ } finally {
+ if (appendStream1 != null) {
+ appendStream1.close();
+ }
+
+ if (appendStream2 != null) {
+ appendStream2.close();
+ }
+ }
+ }
+
+ /*
+ * Tests to verify multiple appends on a Blob.
+ */
+ @Test
+ public void testMultipleAppends() throws Throwable {
+
+ int baseDataSize = 50;
+ byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
+
+ int appendDataSize = 100;
+ int targetAppendCount = 50;
+ byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
+ int testDataIndex = 0;
+ System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
+ testDataIndex += baseDataSize;
+
+ int appendCount = 0;
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+ while (appendCount < targetAppendCount) {
+
+ byte[] appendDataBuffer = getTestData(appendDataSize);
+ appendStream = fs.append(testPath, 30);
+ appendStream.write(appendDataBuffer);
+ appendStream.close();
+
+ System.arraycopy(appendDataBuffer, 0, testData, testDataIndex, appendDataSize);
+ testDataIndex += appendDataSize;
+ appendCount++;
+ }
+
+ assertTrue(verifyAppend(testData, testPath));
+
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ /*
+ * Test to verify we multiple appends on the same stream.
+ */
+ @Test
+ public void testMultipleAppendsOnSameStream() throws Throwable {
+
+ int baseDataSize = 50;
+ byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
+ int appendDataSize = 100;
+ int targetAppendCount = 50;
+ byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
+ int testDataIndex = 0;
+ System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
+ testDataIndex += baseDataSize;
+ int appendCount = 0;
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+
+ while (appendCount < targetAppendCount) {
+
+ appendStream = fs.append(testPath, 50);
+
+ int singleAppendChunkSize = 20;
+ int appendRunSize = 0;
+ while (appendRunSize < appendDataSize) {
+
+ byte[] appendDataBuffer = getTestData(singleAppendChunkSize);
+ appendStream.write(appendDataBuffer);
+ System.arraycopy(appendDataBuffer, 0, testData,
+ testDataIndex + appendRunSize, singleAppendChunkSize);
+
+ appendRunSize += singleAppendChunkSize;
+ }
+
+ appendStream.close();
+ testDataIndex += appendDataSize;
+ appendCount++;
+ }
+
+ assertTrue(verifyAppend(testData, testPath));
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ @Test(expected=UnsupportedOperationException.class)
+ /*
+ * Test to verify the behavior when Append Support configuration flag is set to false
+ */
+ public void testFalseConfigurationFlagBehavior() throws Throwable {
+
+ fs = testAccount.getFileSystem();
+ Configuration conf = fs.getConf();
+ conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, false);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+ createBaseFileWithData(0, testPath);
+ appendStream = fs.append(testPath, 10);
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java
new file mode 100644
index 0000000..869a31c
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.junit.Test;
+
+/**
+ * Test atomic renaming.
+ */
+public class ITestNativeAzureFileSystemAtomicRenameDirList
+ extends AbstractWasbTestBase {
+
+ // HBase-site config controlling HBase root dir
+ private static final String HBASE_ROOT_DIR_CONF_STRING = "hbase.rootdir";
+ private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS =
+ "wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Test
+ public void testAtomicRenameKeyDoesntNPEOnInitializingWithNonDefaultURI()
+ throws IOException {
+ NativeAzureFileSystem azureFs = fs;
+ AzureNativeFileSystemStore azureStore = azureFs.getStore();
+ Configuration conf = fs.getConf();
+ conf.set(HBASE_ROOT_DIR_CONF_STRING, HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+ azureStore.isAtomicRenameKey("anyrandomkey");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java
new file mode 100644
index 0000000..3ec42f0
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test class that runs wasb authorization tests with owner check enabled.
+ */
+public class ITestNativeAzureFileSystemAuthorizationWithOwner
+ extends TestNativeAzureFileSystemAuthorization {
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ authorizer.init(fs.getConf(), true);
+ }
+
+ /**
+ * Test case when owner matches current user.
+ */
+ @Test
+ public void testOwnerPermissionPositive() throws Throwable {
+
+ Path parentDir = new Path("/testOwnerPermissionPositive");
+ Path testPath = new Path(parentDir, "test.data");
+
+ authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
+ authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
+ authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
+ // additional rule used for assertPathExists
+ authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.READ.toString(), true);
+ fs.updateWasbAuthorizer(authorizer);
+
+ try {
+ // creates parentDir with owner as current user
+ fs.mkdirs(parentDir);
+ ContractTestUtils.assertPathExists(fs, "parentDir does not exist", parentDir);
+
+ fs.create(testPath);
+ fs.getFileStatus(testPath);
+ ContractTestUtils.assertPathExists(fs, "testPath does not exist", testPath);
+
+ } finally {
+ allowRecursiveDelete(fs, parentDir.toString());
+ fs.delete(parentDir, true);
+ }
+ }
+
+ /**
+ * Negative test case for owner does not match current user.
+ */
+ @Test
+ public void testOwnerPermissionNegative() throws Throwable {
+ expectedEx.expect(WasbAuthorizationException.class);
+
+ Path parentDir = new Path("/testOwnerPermissionNegative");
+ Path childDir = new Path(parentDir, "childDir");
+
+ setExpectedFailureMessage("mkdirs", childDir);
+
+ authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
+ authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
+
+ fs.updateWasbAuthorizer(authorizer);
+
+ try{
+ fs.mkdirs(parentDir);
+ UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting(
+ "testuser", new String[] {});
+
+ ugiSuperUser.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ fs.mkdirs(childDir);
+ return null;
+ }
+ });
+
+ } finally {
+ allowRecursiveDelete(fs, parentDir.toString());
+ fs.delete(parentDir, true);
+ }
+ }
+
+ /**
+ * Test to verify that retrieving owner information does not
+ * throw when file/folder does not exist.
+ */
+ @Test
+ public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {
+
+ Path testdirectory = new Path("/testDirectory123454565");
+
+ String owner = fs.getOwnerForPath(testdirectory);
+ assertEquals("", owner);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
new file mode 100644
index 0000000..f73a763
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.net.URI;
+import java.util.StringTokenizer;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+/**
+ * Test to validate Azure storage client side logging. Tests works only when
+ * testing with Live Azure storage because Emulator does not have support for
+ * client-side logging.
+ *
+ * <I>Important: </I> Do not attempt to move off commons-logging.
+ * The tests will fail.
+ */
+public class ITestNativeAzureFileSystemClientLogging
+ extends AbstractWasbTestBase {
+
+ // Core-site config controlling Azure Storage Client logging
+ private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
+
+ // Temporary directory created using WASB.
+ private static final String TEMP_DIR = "tempDir";
+
+ /*
+ * Helper method to verify the client logging is working. This check primarily
+ * checks to make sure we see a line in the logs corresponding to the entity
+ * that is created during test run.
+ */
+ private boolean verifyStorageClientLogs(String capturedLogs, String entity)
+ throws Exception {
+
+ URI uri = testAccount.getRealAccount().getBlobEndpoint();
+ String container = testAccount.getRealContainer().getName();
+ String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
+ + entity;
+ boolean entityFound = false;
+
+ StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
+
+ while (tokenizer.hasMoreTokens()) {
+ String token = tokenizer.nextToken();
+ if (token.contains(validateString)) {
+ entityFound = true;
+ break;
+ }
+ }
+ return entityFound;
+ }
+
+ /*
+ * Helper method that updates the core-site config to enable/disable logging.
+ */
+ private void updateFileSystemConfiguration(Boolean loggingFlag)
+ throws Exception {
+
+ Configuration conf = fs.getConf();
+ conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+ }
+
+ // Using WASB code to communicate with Azure Storage.
+ private void performWASBOperations() throws Exception {
+
+ Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
+ fs.mkdirs(tempDir);
+ fs.delete(tempDir, true);
+ }
+
+ @Test
+ public void testLoggingEnabled() throws Exception {
+
+ LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
+ .getRootLogger()));
+
+ // Update configuration based on the Test.
+ updateFileSystemConfiguration(true);
+
+ performWASBOperations();
+
+ String output = getLogOutput(logs);
+ assertTrue("Log entry " + TEMP_DIR + " not found in " + output,
+ verifyStorageClientLogs(output, TEMP_DIR));
+ }
+
+ protected String getLogOutput(LogCapturer logs) {
+ String output = logs.getOutput();
+ assertTrue("No log created/captured", !output.isEmpty());
+ return output;
+ }
+
+ @Test
+ public void testLoggingDisabled() throws Exception {
+
+ LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
+ .getRootLogger()));
+
+ // Update configuration based on the Test.
+ updateFileSystemConfiguration(false);
+
+ performWASBOperations();
+ String output = getLogOutput(logs);
+
+ assertFalse("Log entry " + TEMP_DIR + " found in " + output,
+ verifyStorageClientLogs(output, TEMP_DIR));
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
new file mode 100644
index 0000000..87cac15
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+/***
+ * Test class to hold all Live Azure storage concurrency tests.
+ */
+public class ITestNativeAzureFileSystemConcurrencyLive
+ extends AbstractWasbTestBase {
+
+ private static final int THREAD_COUNT = 102;
+ private static final int TEST_EXECUTION_TIMEOUT = 5000;
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ /**
+ * Validate contract for FileSystem.create when overwrite is true and there
+ * are concurrent callers of FileSystem.delete. An existing file should be
+ * overwritten, even if the original destination exists but is deleted by an
+ * external agent during the create operation.
+ */
+ @Test(timeout = TEST_EXECUTION_TIMEOUT)
+ public void testConcurrentCreateDeleteFile() throws Exception {
+ Path testFile = methodPath();
+
+ List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ tasks.add(new CreateFileTask(fs, testFile));
+ }
+
+ ExecutorService es = null;
+
+ try {
+ es = Executors.newFixedThreadPool(THREAD_COUNT);
+
+ List<Future<Void>> futures = es.invokeAll(tasks);
+
+ for (Future<Void> future : futures) {
+ Assert.assertTrue(future.isDone());
+
+ // we are using Callable<V>, so if an exception
+ // occurred during the operation, it will be thrown
+ // when we call get
+ Assert.assertEquals(null, future.get());
+ }
+ } finally {
+ if (es != null) {
+ es.shutdownNow();
+ }
+ }
+ }
+
+ /**
+ * Validate contract for FileSystem.delete when invoked concurrently.
+ * One of the threads should successfully delete the file and return true;
+ * all other threads should return false.
+ */
+ @Test(timeout = TEST_EXECUTION_TIMEOUT)
+ public void testConcurrentDeleteFile() throws Exception {
+ Path testFile = new Path("test.dat");
+ fs.create(testFile).close();
+
+ List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT);
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ tasks.add(new DeleteFileTask(fs, testFile));
+ }
+
+ ExecutorService es = null;
+ try {
+ es = Executors.newFixedThreadPool(THREAD_COUNT);
+
+ List<Future<Boolean>> futures = es.invokeAll(tasks);
+
+ int successCount = 0;
+ for (Future<Boolean> future : futures) {
+ Assert.assertTrue(future.isDone());
+
+ // we are using Callable<V>, so if an exception
+ // occurred during the operation, it will be thrown
+ // when we call get
+ Boolean success = future.get();
+ if (success) {
+ successCount++;
+ }
+ }
+
+ Assert.assertEquals(
+ "Exactly one delete operation should return true.",
+ 1,
+ successCount);
+ } finally {
+ if (es != null) {
+ es.shutdownNow();
+ }
+ }
+ }
+
+ abstract class FileSystemTask<V> implements Callable<V> {
+ private final FileSystem fileSystem;
+ private final Path path;
+
+ protected FileSystem getFileSystem() {
+ return this.fileSystem;
+ }
+
+ protected Path getFilePath() {
+ return this.path;
+ }
+
+ FileSystemTask(FileSystem fs, Path p) {
+ this.fileSystem = fs;
+ this.path = p;
+ }
+
+ public abstract V call() throws Exception;
+ }
+
+ class DeleteFileTask extends FileSystemTask<Boolean> {
+
+ DeleteFileTask(FileSystem fs, Path p) {
+ super(fs, p);
+ }
+
+ @Override
+ public Boolean call() throws Exception {
+ return this.getFileSystem().delete(this.getFilePath(), false);
+ }
+ }
+
+ class CreateFileTask extends FileSystemTask<Void> {
+ CreateFileTask(FileSystem fs, Path p) {
+ super(fs, p);
+ }
+
+ public Void call() throws Exception {
+ FileSystem fs = getFileSystem();
+ Path p = getFilePath();
+
+ // Create an empty file and close the stream.
+ FSDataOutputStream stream = fs.create(p, true);
+ stream.close();
+
+ // Delete the file. We don't care if delete returns true or false.
+ // We just want to ensure the file does not exist.
+ this.getFileSystem().delete(this.getFilePath(), false);
+
+ return null;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
new file mode 100644
index 0000000..4836fc4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assume.assumeNotNull;
+
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+/**
+ * Run the {@code FileSystemContractBaseTest} tests against the emulator
+ */
+public class ITestNativeAzureFileSystemContractEmulator extends
+ FileSystemContractBaseTest {
+ private AzureBlobStorageTestAccount testAccount;
+ private Path basePath;
+
+ @Rule
+ public TestName methodName = new TestName();
+
+ private void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ nameThread();
+ testAccount = AzureBlobStorageTestAccount.createForEmulator();
+ if (testAccount != null) {
+ fs = testAccount.getFileSystem();
+ }
+ assumeNotNull(fs);
+ basePath = fs.makeQualified(
+ AzureTestUtils.createTestPath(
+ new Path("ITestNativeAzureFileSystemContractEmulator")));
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ fs = null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
new file mode 100644
index 0000000..d3d1bd8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assume.assumeNotNull;
+
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+/**
+ * Run the {@link FileSystemContractBaseTest} test suite against azure storage.
+ */
+public class ITestNativeAzureFileSystemContractLive extends
+ FileSystemContractBaseTest {
+ private AzureBlobStorageTestAccount testAccount;
+ private Path basePath;
+
+ @Rule
+ public TestName methodName = new TestName();
+
+ private void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ nameThread();
+ testAccount = AzureBlobStorageTestAccount.create();
+ if (testAccount != null) {
+ fs = testAccount.getFileSystem();
+ }
+ assumeNotNull(fs);
+ basePath = fs.makeQualified(
+ AzureTestUtils.createTestPath(
+ new Path("NativeAzureFileSystemContractLive")));
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ fs = null;
+ }
+
+ @Override
+ public Path getTestBaseDir() {
+ return basePath;
+ }
+
+ protected int getGlobalTimeout() {
+ return AzureTestConstants.AZURE_TEST_TIMEOUT;
+ }
+
+ /**
+ * The following tests are failing on Azure and the Azure
+ * file system code needs to be modified to make them pass.
+ * A separate work item has been opened for this.
+ */
+ @Ignore
+ @Test
+ public void testMoveFileUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameFileToSelf() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameChildDirForbidden() throws Exception {
+ }
+
+ @Ignore
+ @Test
+ public void testMoveDirUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameDirToSelf() throws Throwable {
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
new file mode 100644
index 0000000..03e90aa
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import static org.junit.Assume.assumeNotNull;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+/**
+ * Run the {@link FileSystemContractBaseTest} test suite against azure
+ * storage, after switching the FS using page blobs everywhere.
+ */
+public class ITestNativeAzureFileSystemContractPageBlobLive extends
+ FileSystemContractBaseTest {
+ private AzureBlobStorageTestAccount testAccount;
+ private Path basePath;
+ @Rule
+ public TestName methodName = new TestName();
+
+ private void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ private AzureBlobStorageTestAccount createTestAccount()
+ throws Exception {
+ Configuration conf = new Configuration();
+
+ // Configure the page blob directories key so every file created is a page blob.
+ conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
+
+ // Configure the atomic rename directories key so every folder will have
+ // atomic rename applied.
+ conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ testAccount = createTestAccount();
+ assumeNotNull(testAccount);
+ fs = testAccount.getFileSystem();
+ basePath = AzureTestUtils.pathForTests(fs, "filesystemcontractpageblob");
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ fs = null;
+ }
+
+ protected int getGlobalTimeout() {
+ return AzureTestConstants.AZURE_TEST_TIMEOUT;
+ }
+
+ @Override
+ public Path getTestBaseDir() {
+ return basePath;
+ }
+
+ /**
+ * The following tests are failing on Azure and the Azure
+ * file system code needs to be modified to make them pass.
+ * A separate work item has been opened for this.
+ */
+ @Ignore
+ @Test
+ public void testMoveFileUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameFileToSelf() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameChildDirForbidden() throws Exception {
+ }
+
+ @Ignore
+ @Test
+ public void testMoveDirUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameDirToSelf() throws Throwable {
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
deleted file mode 100644
index f6ab94d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.FileNotFoundException;
-import java.util.EnumSet;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.blob.BlobOutputStream;
-import com.microsoft.azure.storage.blob.CloudBlobContainer;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-/**
- * Tests that WASB creates containers only if needed.
- */
-public class TestContainerChecks {
- private AzureBlobStorageTestAccount testAccount;
- private boolean runningInSASMode = false;
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- @Before
- public void setMode() {
- runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
- getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
- }
-
- @Test
- public void testContainerExistAfterDoesNotExist() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.noneOf(CreateOptions.class));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // Starting off with the container not there
- assertFalse(container.exists());
-
- // A list shouldn't create the container and will set file system store
- // state to DoesNotExist
- try {
- fs.listStatus(new Path("/"));
- assertTrue("Should've thrown.", false);
- } catch (FileNotFoundException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("does not exist."));
- }
- assertFalse(container.exists());
-
- // Create a container outside of the WASB FileSystem
- container.create();
- // Add a file to the container outside of the WASB FileSystem
- CloudBlockBlob blob = testAccount.getBlobReference("foo");
- BlobOutputStream outputStream = blob.openOutputStream();
- outputStream.write(new byte[10]);
- outputStream.close();
-
- // Make sure the file is visible
- assertTrue(fs.exists(new Path("/foo")));
- assertTrue(container.exists());
- }
-
- @Test
- public void testContainerCreateAfterDoesNotExist() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.noneOf(CreateOptions.class));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // Starting off with the container not there
- assertFalse(container.exists());
-
- // A list shouldn't create the container and will set file system store
- // state to DoesNotExist
- try {
- assertNull(fs.listStatus(new Path("/")));
- assertTrue("Should've thrown.", false);
- } catch (FileNotFoundException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("does not exist."));
- }
- assertFalse(container.exists());
-
- // Create a container outside of the WASB FileSystem
- container.create();
-
- // Write should succeed
- assertTrue(fs.createNewFile(new Path("/foo")));
- assertTrue(container.exists());
- }
-
- @Test
- public void testContainerCreateOnWrite() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.noneOf(CreateOptions.class));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // Starting off with the container not there
- assertFalse(container.exists());
-
- // A list shouldn't create the container.
- try {
- fs.listStatus(new Path("/"));
- assertTrue("Should've thrown.", false);
- } catch (FileNotFoundException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("does not exist."));
- }
- assertFalse(container.exists());
-
- // Neither should a read.
- try {
- fs.open(new Path("/foo"));
- assertFalse("Should've thrown.", true);
- } catch (FileNotFoundException ex) {
- }
- assertFalse(container.exists());
-
- // Neither should a rename
- assertFalse(fs.rename(new Path("/foo"), new Path("/bar")));
- assertFalse(container.exists());
-
- // But a write should.
- assertTrue(fs.createNewFile(new Path("/foo")));
- assertTrue(container.exists());
- }
-
- @Test
- public void testContainerChecksWithSas() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.of(CreateOptions.UseSas));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // The container shouldn't be there
- assertFalse(container.exists());
-
- // A write should just fail
- try {
- fs.createNewFile(new Path("/foo"));
- assertFalse("Should've thrown.", true);
- } catch (AzureException ex) {
- }
- assertFalse(container.exists());
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
deleted file mode 100644
index 9ac25dd..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.FileNotFoundException;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
-
-public class TestFileSystemOperationExceptionHandling
- extends AbstractWasbTestBase {
-
- private FSDataInputStream inputStream = null;
-
- private static Path testPath = new Path("testfile.dat");
-
- private static Path testFolderPath = new Path("testfolder");
-
- /*
- * Helper method that creates a InputStream to validate exceptions
- * for various scenarios
- */
- private void setupInputStreamToTest(AzureBlobStorageTestAccount testAccount)
- throws Exception {
-
- FileSystem fs = testAccount.getFileSystem();
-
- // Step 1: Create a file and write dummy data.
- Path testFilePath1 = new Path("test1.dat");
- Path testFilePath2 = new Path("test2.dat");
- FSDataOutputStream outputStream = fs.create(testFilePath1);
- String testString = "This is a test string";
- outputStream.write(testString.getBytes());
- outputStream.close();
-
- // Step 2: Open a read stream on the file.
- inputStream = fs.open(testFilePath1);
-
- // Step 3: Rename the file
- fs.rename(testFilePath1, testFilePath2);
- }
-
- /*
- * Tests a basic single threaded read scenario for Page blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingleThreadedPageBlobReadScenario() throws Throwable {
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- setupInputStreamToTest(testAccount);
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- /*
- * Tests a basic single threaded seek scenario for Page blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingleThreadedPageBlobSeekScenario() throws Throwable {
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- setupInputStreamToTest(testAccount);
- inputStream.seek(5);
- }
-
- /*
- * Test a basic single thread seek scenario for Block blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingleThreadBlockBlobSeekScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- setupInputStreamToTest(testAccount);
- inputStream.seek(5);
- inputStream.read();
- }
-
- /*
- * Tests a basic single threaded read scenario for Block blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingledThreadBlockBlobReadScenario() throws Throwable{
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- setupInputStreamToTest(testAccount);
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedBlockBlobSetPermissionScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
- fs.delete(testPath, true);
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedPageBlobSetPermissionScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedBlockBlobSetOwnerScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
- fs.delete(testPath, true);
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedPageBlobSetOwnerScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobListStatusScenario() throws Throwable {
- ExceptionHandlingTestHelper.createTestFolder(createTestAccount(), testFolderPath);
- fs.delete(testFolderPath, true);
- fs.listStatus(testFolderPath);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basica single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobListStatusScenario() throws Throwable {
- ExceptionHandlingTestHelper.createTestFolder(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testFolderPath);
- fs.delete(testFolderPath, true);
- fs.listStatus(testFolderPath);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobRenameScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- Path dstPath = new Path("dstFile.dat");
- fs.delete(testPath, true);
- boolean renameResult = fs.rename(testPath, dstPath);
- Assert.assertFalse(renameResult);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobRenameScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Path dstPath = new Path("dstFile.dat");
- fs.delete(testPath, true);
- boolean renameResult = fs.rename(testPath, dstPath);
- Assert.assertFalse(renameResult);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobDeleteScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- fs.delete(testPath, true);
- boolean deleteResult = fs.delete(testPath, true);
- Assert.assertFalse(deleteResult);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobDeleteScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- boolean deleteResult = fs.delete(testPath, true);
- Assert.assertFalse(deleteResult);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- fs.delete(testPath, true);
- inputStream = fs.open(testPath);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- inputStream = fs.open(testPath);
- }
-
- @After
- public void tearDown() throws Exception {
- if (inputStream != null) {
- inputStream.close();
- }
-
- if (fs != null && fs.exists(testPath)) {
- fs.delete(testPath, true);
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
deleted file mode 100644
index e619817..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-import java.net.URI;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
-
-
-public class TestFileSystemOperationExceptionMessage extends
- NativeAzureFileSystemBaseTest {
-
- @Test
- public void testAnonymouseCredentialExceptionMessage() throws Throwable{
-
- Configuration conf = AzureBlobStorageTestAccount.createTestConfiguration();
- String testStorageAccount = conf.get("fs.azure.test.account.name");
- conf = new Configuration();
- conf.set("fs.AbstractFileSystem.wasb.impl", "org.apache.hadoop.fs.azure.Wasb");
- conf.set("fs.azure.skip.metrics", "true");
-
- String testContainer = UUID.randomUUID().toString();
- String wasbUri = String.format("wasb://%s@%s",
- testContainer, testStorageAccount);
-
- fs = new NativeAzureFileSystem();
- try {
- fs.initialize(new URI(wasbUri), conf);
- } catch (Exception ex) {
-
- Throwable innerException = ex.getCause();
- while (innerException != null
- && !(innerException instanceof AzureException)) {
- innerException = innerException.getCause();
- }
-
- if (innerException != null) {
- String exceptionMessage = innerException.getMessage();
- if (exceptionMessage == null
- || exceptionMessage.length() == 0) {
- Assert.fail();}
- else {
- GenericTestUtils.assertExceptionContains(String.format(
- NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
- ex);
- }
- } else {
- Assert.fail();
- }
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
deleted file mode 100644
index 1cd18ee..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.FileNotFoundException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.Test;
-
-public class TestFileSystemOperationsExceptionHandlingMultiThreaded
- extends AbstractWasbTestBase {
-
- FSDataInputStream inputStream = null;
-
- private static Path testPath = new Path("testfile.dat");
- private static Path testFolderPath = new Path("testfolder");
-
-
- /*
- * Helper method to creates an input stream to test various scenarios.
- */
- private void getInputStreamToTest(FileSystem fs, Path testPath) throws Throwable {
-
- FSDataOutputStream outputStream = fs.create(testPath);
- String testString = "This is a test string";
- outputStream.write(testString.getBytes());
- outputStream.close();
-
- inputStream = fs.open(testPath);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded read
- * scenario for block blobs
- */
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadedBlockBlobReadScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
-
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded seek
- * scenario for block blobs
- */
-
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadBlockBlobSeekScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
-
- inputStream.seek(5);
- inputStream.read();
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedPageBlobSetPermissionScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedPageBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- inputStream = fs.open(testPath);
- inputStream.close();
- }
-
- inputStream = fs.open(testPath);
- inputStream.close();
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedBlockBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
-
- while (t.isAlive()) {
- inputStream = fs.open(testPath);
- inputStream.close();
- }
- inputStream = fs.open(testPath);
- inputStream.close();
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setOwner scenario
- */
- public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setOwner(testPath, "testowner", "testgroup");
- }
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setOwner scenario
- */
- public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setOwner(testPath, "testowner", "testgroup");
- }
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded listStatus scenario
- */
- public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createTestFolder(createTestAccount(), testFolderPath);
- Thread t = new Thread(new DeleteThread(fs, testFolderPath));
- t.start();
- while (t.isAlive()) {
- fs.listStatus(testFolderPath);
- }
- fs.listStatus(testFolderPath);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded listStatus scenario
- */
- public void testMultiThreadedPageBlobListStatusScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createTestFolder(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testFolderPath);
- Thread t = new Thread(new DeleteThread(fs, testFolderPath));
- t.start();
- while (t.isAlive()) {
- fs.listStatus(testFolderPath);
- }
- fs.listStatus(testFolderPath);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded read
- * scenario for page blobs
- */
-
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadedPageBlobReadScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded seek
- * scenario for page blobs
- */
-
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadedPageBlobSeekScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
- inputStream.seek(5);
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- @After
- public void tearDown() throws Exception {
-
- if (inputStream != null) {
- inputStream.close();
- }
-
- if (fs != null && fs.exists(testPath)) {
- fs.delete(testPath, true);
- }
- }
-}
-
-/*
- * Helper thread that just renames the test file.
- */
-class RenameThread implements Runnable {
-
- private FileSystem fs;
- private Path testPath;
- private Path renamePath = new Path("test2.dat");
-
- public RenameThread(FileSystem fs, Path testPath) {
- this.fs = fs;
- this.testPath = testPath;
- }
-
- @Override
- public void run(){
- try {
- fs.rename(testPath, renamePath);
- }catch (Exception e) {
- // Swallowing the exception as the
- // correctness of the test is controlled
- // by the other thread
- }
- }
-}
-
-class DeleteThread implements Runnable {
- private FileSystem fs;
- private Path testPath;
-
- public DeleteThread(FileSystem fs, Path testPath) {
- this.fs = fs;
- this.testPath = testPath;
- }
-
- @Override
- public void run() {
- try {
- fs.delete(testPath, true);
- } catch (Exception e) {
- // Swallowing the exception as the
- // correctness of the test is controlled
- // by the other thread
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
deleted file mode 100644
index fd3690c..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
+++ /dev/null
@@ -1,821 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-/**
- * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
- */
-public class TestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
-
- private final int renameThreads = 10;
- private final int deleteThreads = 20;
- private int iterations = 1;
- private LogCapturer logs = null;
-
- @Rule
- public ExpectedException exception = ExpectedException.none();
-
- @Before
- public void setUp() throws Exception {
- super.setUp();
- Configuration conf = fs.getConf();
-
- // By default enable parallel threads for rename and delete operations.
- // Also enable flat listing of blobs for these operations.
- conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, renameThreads);
- conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, deleteThreads);
- conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, true);
-
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- // Capture logs
- logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
- .getRootLogger()));
- }
-
- /*
- * Helper method to create sub directory and different types of files
- * for multiple iterations.
- */
- private void createFolder(FileSystem fs, String root) throws Exception {
- fs.mkdirs(new Path(root));
- for (int i = 0; i < this.iterations; i++) {
- fs.mkdirs(new Path(root + "/" + i));
- fs.createNewFile(new Path(root + "/" + i + "/fileToRename"));
- fs.createNewFile(new Path(root + "/" + i + "/file/to/rename"));
- fs.createNewFile(new Path(root + "/" + i + "/file+to%rename"));
- fs.createNewFile(new Path(root + "/fileToRename" + i));
- }
- }
-
- /*
- * Helper method to do rename operation and validate all files in source folder
- * doesn't exists and similar files exists in new folder.
- */
- private void validateRenameFolder(FileSystem fs, String source, String dest) throws Exception {
- // Create source folder with files.
- createFolder(fs, source);
- Path sourceFolder = new Path(source);
- Path destFolder = new Path(dest);
-
- // rename operation
- assertTrue(fs.rename(sourceFolder, destFolder));
- assertTrue(fs.exists(destFolder));
-
- for (int i = 0; i < this.iterations; i++) {
- // Check destination folder and files exists.
- assertTrue(fs.exists(new Path(dest + "/" + i)));
- assertTrue(fs.exists(new Path(dest + "/" + i + "/fileToRename")));
- assertTrue(fs.exists(new Path(dest + "/" + i + "/file/to/rename")));
- assertTrue(fs.exists(new Path(dest + "/" + i + "/file+to%rename")));
- assertTrue(fs.exists(new Path(dest + "/fileToRename" + i)));
-
- // Check source folder and files doesn't exists.
- assertFalse(fs.exists(new Path(source + "/" + i)));
- assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
- assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
- }
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameSmallFolderWithThreads() throws Exception {
-
- validateRenameFolder(fs, "root", "rootnew");
-
- // With single iteration, we would have created 7 blobs.
- int expectedThreadsCreated = Math.min(7, renameThreads);
-
- // Validate from logs that threads are created.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + expectedThreadsCreated);
-
- // Validate thread executions
- for (int i = 0; i < expectedThreadsCreated; i++) {
- assertInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
-
- // Also ensure that we haven't spawned extra threads.
- if (expectedThreadsCreated < renameThreads) {
- for (int i = expectedThreadsCreated; i < renameThreads; i++) {
- assertNotInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameLargeFolderWithThreads() throws Exception {
-
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateRenameFolder(fs, "root", "rootnew");
-
- // Validate from logs that threads are created.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + renameThreads);
-
- // Validate thread executions
- for (int i = 0; i < renameThreads; i++) {
- assertInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for rename operation with threads disabled and flat listing enabled.
- */
- @Test
- public void testRenameLargeFolderDisableThreads() throws Exception {
- Configuration conf = fs.getConf();
-
- // Number of threads set to 0 or 1 disables threads.
- conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 0);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateRenameFolder(fs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Rename operation as thread count 0");
-
- // Validate no thread executions
- for (int i = 0; i < renameThreads; i++) {
- String term = "AzureBlobRenameThread-"
- + Thread.currentThread().getName()
- + "-" + i;
- assertNotInLog(content, term);
- }
- }
-
- /**
- * Assert that a log contains the given term.
- * @param content log output
- * @param term search term
- */
- protected void assertInLog(String content, String term) {
- assertTrue("Empty log", !content.isEmpty());
- if (!content.contains(term)) {
- String message = "No " + term + " found in logs";
- LOG.error(message);
- System.err.println(content);
- fail(message);
- }
- }
-
- /**
- * Assert that a log does not contain the given term.
- * @param content log output
- * @param term search term
- */
- protected void assertNotInLog(String content, String term) {
- assertTrue("Empty log", !content.isEmpty());
- if (content.contains(term)) {
- String message = term + " found in logs";
- LOG.error(message);
- System.err.println(content);
- fail(message);
- }
- }
-
- /*
- * Test case for rename operation with threads and flat listing disabled.
- */
- @Test
- public void testRenameSmallFolderDisableThreadsDisableFlatListing() throws Exception {
- Configuration conf = fs.getConf();
- conf = fs.getConf();
-
- // Number of threads set to 0 or 1 disables threads.
- conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 1);
- conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- validateRenameFolder(fs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Rename operation as thread count 1");
-
- // Validate no thread executions
- for (int i = 0; i < renameThreads; i++) {
- assertNotInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Helper method to do delete operation and validate all files in source folder
- * doesn't exists after delete operation.
- */
- private void validateDeleteFolder(FileSystem fs, String source) throws Exception {
- // Create folder with files.
- createFolder(fs, "root");
- Path sourceFolder = new Path(source);
-
- // Delete operation
- assertTrue(fs.delete(sourceFolder, true));
- assertFalse(fs.exists(sourceFolder));
-
- for (int i = 0; i < this.iterations; i++) {
- // check that source folder and files doesn't exists
- assertFalse(fs.exists(new Path(source + "/" + i)));
- assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
- assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
- }
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteSmallFolderWithThreads() throws Exception {
-
- validateDeleteFolder(fs, "root");
-
- // With single iteration, we would have created 7 blobs.
- int expectedThreadsCreated = Math.min(7, deleteThreads);
-
- // Validate from logs that threads are enabled.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + expectedThreadsCreated);
-
- // Validate thread executions
- for (int i = 0; i < expectedThreadsCreated; i++) {
- assertInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
-
- // Also ensure that we haven't spawned extra threads.
- if (expectedThreadsCreated < deleteThreads) {
- for (int i = expectedThreadsCreated; i < deleteThreads; i++) {
- assertNotInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteLargeFolderWithThreads() throws Exception {
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateDeleteFolder(fs, "root");
-
- // Validate from logs that threads are enabled.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + deleteThreads);
-
- // Validate thread executions
- for (int i = 0; i < deleteThreads; i++) {
- assertInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for delete operation with threads disabled and flat listing enabled.
- */
- @Test
- public void testDeleteLargeFolderDisableThreads() throws Exception {
- Configuration conf = fs.getConf();
- conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 0);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateDeleteFolder(fs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Delete operation as thread count 0");
-
- // Validate no thread executions
- for (int i = 0; i < deleteThreads; i++) {
- assertNotInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for rename operation with threads and flat listing disabled.
- */
- @Test
- public void testDeleteSmallFolderDisableThreadsDisableFlatListing() throws Exception {
- Configuration conf = fs.getConf();
-
- // Number of threads set to 0 or 1 disables threads.
- conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 1);
- conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- validateDeleteFolder(fs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Delete operation as thread count 1");
-
- // Validate no thread executions
- for (int i = 0; i < deleteThreads; i++) {
- assertNotInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolExceptionFailure() throws Exception {
-
- // Spy azure file system object and raise exception for new thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
-
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- validateDeleteFolder(mockFs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content, "Failed to create thread pool with threads");
- assertInLog(content, "Serializing the Delete operation");
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolExecuteFailure() throws Exception {
-
- // Mock thread pool executor to throw exception for all requests.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
-
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- validateDeleteFolder(mockFs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Rejected execution of thread for Delete operation on blob");
- assertInLog(content, "Serializing the Delete operation");
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolExecuteSingleThreadFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- // Create a thread executor and link it to mocked thread pool executor object.
- ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // Mock thread executor to throw exception for all requests.
- Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- validateDeleteFolder(mockFs, "root");
-
- // Validate from logs that threads are enabled and unused threads.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads 7");
- assertInLog(content,
- "6 threads not used for Delete operation on blob");
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolTerminationFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- ((NativeAzureFileSystem) fs).getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
-
- // Create a thread executor and link it to mocked thread pool executor object.
- // Mock thread executor to throw exception for terminating threads.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
- Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
-
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
- boolean exception = false;
- try {
- mockFs.delete(sourceFolder, true);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and delete operation is failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads");
- assertInLog(content, "Threads got interrupted Delete blob operation");
- assertInLog(content,
- "Delete failed as operation on subfolders and files failed.");
- }
-
- /*
- * Validate that when a directory is deleted recursively, the operation succeeds
- * even if a child directory delete fails because the directory does not exist.
- * This can happen if a child directory is deleted by an external agent while
- * the parent is in progress of being deleted recursively.
- */
- @Test
- public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted()
- throws Exception {
- testRecusiveDirectoryDelete(true);
- }
-
- /*
- * Validate that when a directory is deleted recursively, the operation succeeds
- * even if a file delete fails because it does not exist.
- * This can happen if a file is deleted by an external agent while
- * the parent directory is in progress of being deleted.
- */
- @Test
- public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse()
- throws Exception {
- testRecusiveDirectoryDelete(false);
- }
-
- private void testRecusiveDirectoryDelete(boolean useDir) throws Exception {
- String childPathToBeDeletedByExternalAgent = (useDir)
- ? "root/0"
- : "root/0/fileToRename";
- // Spy azure file system object and return false for deleting one file
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path(
- childPathToBeDeletedByExternalAgent)));
-
- Answer<Boolean> answer = new Answer<Boolean>() {
- public Boolean answer(InvocationOnMock invocation) throws Throwable {
- String path = (String) invocation.getArguments()[0];
- boolean isDir = (boolean) invocation.getArguments()[1];
- boolean realResult = fs.deleteFile(path, isDir);
- assertTrue(realResult);
- boolean fakeResult = false;
- return fakeResult;
- }
- };
-
- Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer);
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
-
- assertTrue(mockFs.delete(sourceFolder, true));
- assertFalse(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled, that a child directory was
- // deleted by an external caller, and the parent delete operation still
- // succeeds.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads");
- assertInLog(content, String.format("Attempt to delete non-existent %s %s",
- useDir ? "directory" : "file", path));
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteSingleDeleteException() throws Exception {
-
- // Spy azure file system object and raise exception for deleting one file
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
- Mockito.doThrow(new IOException()).when(mockFs).deleteFile(path, true);
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
-
- boolean exception = false;
- try {
- mockFs.delete(sourceFolder, true);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and delete operation failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads");
- assertInLog(content,
- "Encountered Exception for Delete operation for file " + path);
- assertInLog(content,
- "Terminating execution of Delete operation now as some other thread already got exception or operation failed");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolExceptionFailure() throws Exception {
-
- // Spy azure file system object and raise exception for new thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- ((NativeAzureFileSystem) fs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.doReturn(mockThreadPoolExecutor).when(mockFs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS);
-
- validateRenameFolder(mockFs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content, "Failed to create thread pool with threads");
- assertInLog(content, "Serializing the Rename operation");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolExecuteFailure() throws Exception {
-
- // Mock thread pool executor to throw exception for all requests.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- validateRenameFolder(mockFs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Rejected execution of thread for Rename operation on blob");
- assertInLog(content, "Serializing the Rename operation");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolExecuteSingleThreadFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- // Create a thread executor and link it to mocked thread pool executor object.
- ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // Mock thread executor to throw exception for all requests.
- Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- validateRenameFolder(mockFs, "root", "rootnew");
-
- // Validate from logs that threads are enabled and unused threads exists.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Rename operation with threads 7");
- assertInLog(content,
- "6 threads not used for Rename operation on blob");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolTerminationFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- // Mock thread executor to throw exception for all requests.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
- Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
- Path destFolder = new Path("rootnew");
- boolean exception = false;
- try {
- mockFs.rename(sourceFolder, destFolder);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and rename operation is failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Rename operation with threads");
- assertInLog(content, "Threads got interrupted Rename blob operation");
- assertInLog(content,
- "Rename failed as operation on subfolders and files failed.");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameSingleRenameException() throws Exception {
-
- // Spy azure file system object and raise exception for deleting one file
- Path sourceFolder = new Path("root");
- Path destFolder = new Path("rootnew");
-
- // Spy azure file system object and populate rename pending spy object.
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Populate data now only such that rename pending spy object would see this data.
- createFolder(mockFs, "root");
-
- String srcKey = mockFs.pathToKey(mockFs.makeAbsolute(sourceFolder));
- String dstKey = mockFs.pathToKey(mockFs.makeAbsolute(destFolder));
-
- FolderRenamePending mockRenameFs = Mockito.spy(mockFs.prepareAtomicFolderRename(srcKey, dstKey));
- Mockito.when(mockFs.prepareAtomicFolderRename(srcKey, dstKey)).thenReturn(mockRenameFs);
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
- Mockito.doThrow(new IOException()).when(mockRenameFs).renameFile(Mockito.any(FileMetadata.class));
-
- boolean exception = false;
- try {
- mockFs.rename(sourceFolder, destFolder);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and delete operation failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Rename operation with threads");
- assertInLog(content,
- "Encountered Exception for Rename operation for file " + path);
- assertInLog(content,
- "Terminating execution of Rename operation now as some other thread already got exception or operation failed");
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
deleted file mode 100644
index 6149154..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-
-import static org.apache.hadoop.fs.azure.SecureStorageInterfaceImpl.KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS;
-
-/**
- * Test class to hold all WASB authorization tests that use blob-specific keys
- * to access storage.
- */
-public class TestNativeAzureFSAuthWithBlobSpecificKeys
- extends TestNativeAzureFileSystemAuthorizationWithOwner {
-
- @Override
- public Configuration getConfiguration() {
- Configuration conf = super.getConfiguration();
- conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
- return conf;
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = getConfiguration();
- return AzureBlobStorageTestAccount.create(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
deleted file mode 100644
index 84558f8..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Test;
-
-import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE;
-
-/**
- * Test class to hold all WASB authorization caching related tests.
- */
-public class TestNativeAzureFSAuthorizationCaching
- extends TestNativeAzureFileSystemAuthorizationWithOwner {
-
- private static final int DUMMY_TTL_VALUE = 5000;
-
- @Override
- public Configuration getConfiguration() {
- Configuration conf = super.getConfiguration();
- conf.set(KEY_AUTH_SERVICE_CACHING_ENABLE, "true");
- return conf;
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = getConfiguration();
- return AzureBlobStorageTestAccount.create(conf);
- }
-
- /**
- * Test to verify cache behavior -- assert that PUT overwrites value if present
- */
- @Test
- public void testCachePut() throws Throwable {
- CachingAuthorizer<String, Integer> cache = new CachingAuthorizer<>(DUMMY_TTL_VALUE, "TEST");
- cache.init(getConfiguration());
- cache.put("TEST", 1);
- cache.put("TEST", 3);
- int result = cache.get("TEST");
- ContractTestUtils.assertTrue("Cache returned unexpected result", result == 3);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
deleted file mode 100644
index 208cff3..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Run the base Azure file system tests strictly on page blobs to make sure fundamental
- * operations on page blob files and folders work as expected.
- * These operations include create, delete, rename, list, and so on.
- */
-public class TestNativeAzureFSPageBlobLive extends
- NativeAzureFileSystemBaseTest {
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount()
- throws Exception {
- Configuration conf = new Configuration();
-
- // Configure the page blob directories key so every file created is a page blob.
- conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
-
- // Configure the atomic rename directories key so every folder will have
- // atomic rename applied.
- conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
- return AzureBlobStorageTestAccount.create(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java
deleted file mode 100644
index a2b35cb..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java
+++ /dev/null
@@ -1,362 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.Arrays;
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
-
- private static final String TEST_FILE = "test.dat";
- private static final Path TEST_PATH = new Path(TEST_FILE);
-
- private AzureBlobStorageTestAccount testAccount = null;
-
- @Before
- public void setUp() throws Exception {
- super.setUp();
- testAccount = createTestAccount();
- fs = testAccount.getFileSystem();
- Configuration conf = fs.getConf();
- conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, true);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
- }
-
- /*
- * Helper method that creates test data of size provided by the
- * "size" parameter.
- */
- private static byte[] getTestData(int size) {
- byte[] testData = new byte[size];
- System.arraycopy(RandomStringUtils.randomAlphabetic(size).getBytes(), 0, testData, 0, size);
- return testData;
- }
-
- // Helper method to create file and write fileSize bytes of data on it.
- private byte[] createBaseFileWithData(int fileSize, Path testPath) throws Throwable {
-
- FSDataOutputStream createStream = null;
- try {
- createStream = fs.create(testPath);
- byte[] fileData = null;
-
- if (fileSize != 0) {
- fileData = getTestData(fileSize);
- createStream.write(fileData);
- }
- return fileData;
- } finally {
- if (createStream != null) {
- createStream.close();
- }
- }
- }
-
- /*
- * Helper method to verify a file data equal to "dataLength" parameter
- */
- private boolean verifyFileData(int dataLength, byte[] testData, int testDataIndex,
- FSDataInputStream srcStream) {
-
- try {
-
- byte[] fileBuffer = new byte[dataLength];
- byte[] testDataBuffer = new byte[dataLength];
-
- int fileBytesRead = srcStream.read(fileBuffer);
-
- if (fileBytesRead < dataLength) {
- return false;
- }
-
- System.arraycopy(testData, testDataIndex, testDataBuffer, 0, dataLength);
-
- if (!Arrays.equals(fileBuffer, testDataBuffer)) {
- return false;
- }
-
- return true;
-
- } catch (Exception ex) {
- return false;
- }
-
- }
-
- /*
- * Helper method to verify Append on a testFile.
- */
- private boolean verifyAppend(byte[] testData, Path testFile) {
-
- FSDataInputStream srcStream = null;
- try {
-
- srcStream = fs.open(testFile);
- int baseBufferSize = 2048;
- int testDataSize = testData.length;
- int testDataIndex = 0;
-
- while (testDataSize > baseBufferSize) {
-
- if (!verifyFileData(baseBufferSize, testData, testDataIndex, srcStream)) {
- return false;
- }
- testDataIndex += baseBufferSize;
- testDataSize -= baseBufferSize;
- }
-
- if (!verifyFileData(testDataSize, testData, testDataIndex, srcStream)) {
- return false;
- }
-
- return true;
- } catch(Exception ex) {
- return false;
- } finally {
- if (srcStream != null) {
- try {
- srcStream.close();
- } catch(IOException ioe) {
- // Swallowing
- }
- }
- }
- }
-
- /*
- * Test case to verify if an append on small size data works. This tests
- * append E2E
- */
- @Test
- public void testSingleAppend() throws Throwable{
-
- FSDataOutputStream appendStream = null;
- try {
- int baseDataSize = 50;
- byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
-
- int appendDataSize = 20;
- byte[] appendDataBuffer = getTestData(appendDataSize);
- appendStream = fs.append(TEST_PATH, 10);
- appendStream.write(appendDataBuffer);
- appendStream.close();
- byte[] testData = new byte[baseDataSize + appendDataSize];
- System.arraycopy(baseDataBuffer, 0, testData, 0, baseDataSize);
- System.arraycopy(appendDataBuffer, 0, testData, baseDataSize, appendDataSize);
-
- Assert.assertTrue(verifyAppend(testData, TEST_PATH));
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- /*
- * Test case to verify append to an empty file.
- */
- @Test
- public void testSingleAppendOnEmptyFile() throws Throwable {
-
- FSDataOutputStream appendStream = null;
-
- try {
- createBaseFileWithData(0, TEST_PATH);
-
- int appendDataSize = 20;
- byte[] appendDataBuffer = getTestData(appendDataSize);
- appendStream = fs.append(TEST_PATH, 10);
- appendStream.write(appendDataBuffer);
- appendStream.close();
-
- Assert.assertTrue(verifyAppend(appendDataBuffer, TEST_PATH));
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- /*
- * Test to verify that we can open only one Append stream on a File.
- */
- @Test
- public void testSingleAppenderScenario() throws Throwable {
-
- FSDataOutputStream appendStream1 = null;
- FSDataOutputStream appendStream2 = null;
- IOException ioe = null;
- try {
- createBaseFileWithData(0, TEST_PATH);
- appendStream1 = fs.append(TEST_PATH, 10);
- boolean encounteredException = false;
- try {
- appendStream2 = fs.append(TEST_PATH, 10);
- } catch(IOException ex) {
- encounteredException = true;
- ioe = ex;
- }
-
- appendStream1.close();
-
- Assert.assertTrue(encounteredException);
- GenericTestUtils.assertExceptionContains("Unable to set Append lease on the Blob", ioe);
- } finally {
- if (appendStream1 != null) {
- appendStream1.close();
- }
-
- if (appendStream2 != null) {
- appendStream2.close();
- }
- }
- }
-
- /*
- * Tests to verify multiple appends on a Blob.
- */
- @Test
- public void testMultipleAppends() throws Throwable {
-
- int baseDataSize = 50;
- byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
-
- int appendDataSize = 100;
- int targetAppendCount = 50;
- byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
- int testDataIndex = 0;
- System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
- testDataIndex += baseDataSize;
-
- int appendCount = 0;
-
- FSDataOutputStream appendStream = null;
-
- try {
- while (appendCount < targetAppendCount) {
-
- byte[] appendDataBuffer = getTestData(appendDataSize);
- appendStream = fs.append(TEST_PATH, 30);
- appendStream.write(appendDataBuffer);
- appendStream.close();
-
- System.arraycopy(appendDataBuffer, 0, testData, testDataIndex, appendDataSize);
- testDataIndex += appendDataSize;
- appendCount++;
- }
-
- Assert.assertTrue(verifyAppend(testData, TEST_PATH));
-
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- /*
- * Test to verify we multiple appends on the same stream.
- */
- @Test
- public void testMultipleAppendsOnSameStream() throws Throwable {
-
- int baseDataSize = 50;
- byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
- int appendDataSize = 100;
- int targetAppendCount = 50;
- byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
- int testDataIndex = 0;
- System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
- testDataIndex += baseDataSize;
- int appendCount = 0;
-
- FSDataOutputStream appendStream = null;
-
- try {
-
- while (appendCount < targetAppendCount) {
-
- appendStream = fs.append(TEST_PATH, 50);
-
- int singleAppendChunkSize = 20;
- int appendRunSize = 0;
- while (appendRunSize < appendDataSize) {
-
- byte[] appendDataBuffer = getTestData(singleAppendChunkSize);
- appendStream.write(appendDataBuffer);
- System.arraycopy(appendDataBuffer, 0, testData,
- testDataIndex + appendRunSize, singleAppendChunkSize);
-
- appendRunSize += singleAppendChunkSize;
- }
-
- appendStream.close();
- testDataIndex += appendDataSize;
- appendCount++;
- }
-
- Assert.assertTrue(verifyAppend(testData, TEST_PATH));
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- @Test(expected=UnsupportedOperationException.class)
- /*
- * Test to verify the behavior when Append Support configuration flag is set to false
- */
- public void testFalseConfigurationFlagBehavior() throws Throwable {
-
- fs = testAccount.getFileSystem();
- Configuration conf = fs.getConf();
- conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, false);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- FSDataOutputStream appendStream = null;
-
- try {
- createBaseFileWithData(0, TEST_PATH);
- appendStream = fs.append(TEST_PATH, 10);
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
deleted file mode 100644
index 602c1f7..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemAtomicRenameDirList
- extends AbstractWasbTestBase {
- private AzureBlobStorageTestAccount testAccount;
-
- // HBase-site config controlling HBase root dir
- private static final String HBASE_ROOT_DIR_CONF_STRING = "hbase.rootdir";
- private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS = "wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- return testAccount;
- }
-
- @Test
- public void testAzureNativeStoreIsAtomicRenameKeyDoesNotThrowNPEOnInitializingWithNonDefaultURI () throws IOException {
- NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
- AzureNativeFileSystemStore azureStore = azureFs.getStore();
- Configuration conf = fs.getConf();
- conf.set(HBASE_ROOT_DIR_CONF_STRING, HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
- azureStore.isAtomicRenameKey("anyrandomkey");
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
new file mode 100644
index 0000000..4389fda
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
@@ -0,0 +1,821 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
+ */
+public class ITestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
+
+ private final int renameThreads = 10;
+ private final int deleteThreads = 20;
+ private int iterations = 1;
+ private LogCapturer logs = null;
+
+ @Rule
+ public ExpectedException exception = ExpectedException.none();
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ Configuration conf = fs.getConf();
+
+ // By default enable parallel threads for rename and delete operations.
+ // Also enable flat listing of blobs for these operations.
+ conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, renameThreads);
+ conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, deleteThreads);
+ conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, true);
+
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ // Capture logs
+ logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
+ .getRootLogger()));
+ }
+
+ /*
+ * Helper method to create sub directory and different types of files
+ * for multiple iterations.
+ */
+ private void createFolder(FileSystem fs, String root) throws Exception {
+ fs.mkdirs(new Path(root));
+ for (int i = 0; i < this.iterations; i++) {
+ fs.mkdirs(new Path(root + "/" + i));
+ fs.createNewFile(new Path(root + "/" + i + "/fileToRename"));
+ fs.createNewFile(new Path(root + "/" + i + "/file/to/rename"));
+ fs.createNewFile(new Path(root + "/" + i + "/file+to%rename"));
+ fs.createNewFile(new Path(root + "/fileToRename" + i));
+ }
+ }
+
+ /*
+ * Helper method to do rename operation and validate all files in source folder
+ * doesn't exists and similar files exists in new folder.
+ */
+ private void validateRenameFolder(FileSystem fs, String source, String dest) throws Exception {
+ // Create source folder with files.
+ createFolder(fs, source);
+ Path sourceFolder = new Path(source);
+ Path destFolder = new Path(dest);
+
+ // rename operation
+ assertTrue(fs.rename(sourceFolder, destFolder));
+ assertTrue(fs.exists(destFolder));
+
+ for (int i = 0; i < this.iterations; i++) {
+ // Check destination folder and files exists.
+ assertTrue(fs.exists(new Path(dest + "/" + i)));
+ assertTrue(fs.exists(new Path(dest + "/" + i + "/fileToRename")));
+ assertTrue(fs.exists(new Path(dest + "/" + i + "/file/to/rename")));
+ assertTrue(fs.exists(new Path(dest + "/" + i + "/file+to%rename")));
+ assertTrue(fs.exists(new Path(dest + "/fileToRename" + i)));
+
+ // Check source folder and files doesn't exists.
+ assertFalse(fs.exists(new Path(source + "/" + i)));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
+ assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
+ }
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameSmallFolderWithThreads() throws Exception {
+
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // With single iteration, we would have created 7 blobs.
+ int expectedThreadsCreated = Math.min(7, renameThreads);
+
+ // Validate from logs that threads are created.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + expectedThreadsCreated);
+
+ // Validate thread executions
+ for (int i = 0; i < expectedThreadsCreated; i++) {
+ assertInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+
+ // Also ensure that we haven't spawned extra threads.
+ if (expectedThreadsCreated < renameThreads) {
+ for (int i = expectedThreadsCreated; i < renameThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameLargeFolderWithThreads() throws Exception {
+
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // Validate from logs that threads are created.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + renameThreads);
+
+ // Validate thread executions
+ for (int i = 0; i < renameThreads; i++) {
+ assertInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for rename operation with threads disabled and flat listing enabled.
+ */
+ @Test
+ public void testRenameLargeFolderDisableThreads() throws Exception {
+ Configuration conf = fs.getConf();
+
+ // Number of threads set to 0 or 1 disables threads.
+ conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 0);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Rename operation as thread count 0");
+
+ // Validate no thread executions
+ for (int i = 0; i < renameThreads; i++) {
+ String term = "AzureBlobRenameThread-"
+ + Thread.currentThread().getName()
+ + "-" + i;
+ assertNotInLog(content, term);
+ }
+ }
+
+ /**
+ * Assert that a log contains the given term.
+ * @param content log output
+ * @param term search term
+ */
+ protected void assertInLog(String content, String term) {
+ assertTrue("Empty log", !content.isEmpty());
+ if (!content.contains(term)) {
+ String message = "No " + term + " found in logs";
+ LOG.error(message);
+ System.err.println(content);
+ fail(message);
+ }
+ }
+
+ /**
+ * Assert that a log does not contain the given term.
+ * @param content log output
+ * @param term search term
+ */
+ protected void assertNotInLog(String content, String term) {
+ assertTrue("Empty log", !content.isEmpty());
+ if (content.contains(term)) {
+ String message = term + " found in logs";
+ LOG.error(message);
+ System.err.println(content);
+ fail(message);
+ }
+ }
+
+ /*
+ * Test case for rename operation with threads and flat listing disabled.
+ */
+ @Test
+ public void testRenameSmallFolderDisableThreadsDisableFlatListing() throws Exception {
+ Configuration conf = fs.getConf();
+ conf = fs.getConf();
+
+ // Number of threads set to 0 or 1 disables threads.
+ conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 1);
+ conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ validateRenameFolder(fs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Rename operation as thread count 1");
+
+ // Validate no thread executions
+ for (int i = 0; i < renameThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Helper method to do delete operation and validate all files in source folder
+ * doesn't exists after delete operation.
+ */
+ private void validateDeleteFolder(FileSystem fs, String source) throws Exception {
+ // Create folder with files.
+ createFolder(fs, "root");
+ Path sourceFolder = new Path(source);
+
+ // Delete operation
+ assertTrue(fs.delete(sourceFolder, true));
+ assertFalse(fs.exists(sourceFolder));
+
+ for (int i = 0; i < this.iterations; i++) {
+ // check that source folder and files doesn't exists
+ assertFalse(fs.exists(new Path(source + "/" + i)));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
+ assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
+ assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
+ }
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteSmallFolderWithThreads() throws Exception {
+
+ validateDeleteFolder(fs, "root");
+
+ // With single iteration, we would have created 7 blobs.
+ int expectedThreadsCreated = Math.min(7, deleteThreads);
+
+ // Validate from logs that threads are enabled.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + expectedThreadsCreated);
+
+ // Validate thread executions
+ for (int i = 0; i < expectedThreadsCreated; i++) {
+ assertInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+
+ // Also ensure that we haven't spawned extra threads.
+ if (expectedThreadsCreated < deleteThreads) {
+ for (int i = expectedThreadsCreated; i < deleteThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteLargeFolderWithThreads() throws Exception {
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateDeleteFolder(fs, "root");
+
+ // Validate from logs that threads are enabled.
+ String content = logs.getOutput();
+ assertInLog(content, "ms with threads: " + deleteThreads);
+
+ // Validate thread executions
+ for (int i = 0; i < deleteThreads; i++) {
+ assertInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for delete operation with threads disabled and flat listing enabled.
+ */
+ @Test
+ public void testDeleteLargeFolderDisableThreads() throws Exception {
+ Configuration conf = fs.getConf();
+ conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 0);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ // Populate source folder with large number of files and directories.
+ this.iterations = 10;
+ validateDeleteFolder(fs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Delete operation as thread count 0");
+
+ // Validate no thread executions
+ for (int i = 0; i < deleteThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for rename operation with threads and flat listing disabled.
+ */
+ @Test
+ public void testDeleteSmallFolderDisableThreadsDisableFlatListing() throws Exception {
+ Configuration conf = fs.getConf();
+
+ // Number of threads set to 0 or 1 disables threads.
+ conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 1);
+ conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ validateDeleteFolder(fs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Disabling threads for Delete operation as thread count 1");
+
+ // Validate no thread executions
+ for (int i = 0; i < deleteThreads; i++) {
+ assertNotInLog(content,
+ "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
+ }
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolExceptionFailure() throws Exception {
+
+ // Spy azure file system object and raise exception for new thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ validateDeleteFolder(mockFs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content, "Failed to create thread pool with threads");
+ assertInLog(content, "Serializing the Delete operation");
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolExecuteFailure() throws Exception {
+
+ // Mock thread pool executor to throw exception for all requests.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ validateDeleteFolder(mockFs, "root");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Rejected execution of thread for Delete operation on blob");
+ assertInLog(content, "Serializing the Delete operation");
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolExecuteSingleThreadFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ // Create a thread executor and link it to mocked thread pool executor object.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // Mock thread executor to throw exception for all requests.
+ Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ validateDeleteFolder(mockFs, "root");
+
+ // Validate from logs that threads are enabled and unused threads.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads 7");
+ assertInLog(content,
+ "6 threads not used for Delete operation on blob");
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteThreadPoolTerminationFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ ((NativeAzureFileSystem) fs).getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
+
+ // Create a thread executor and link it to mocked thread pool executor object.
+ // Mock thread executor to throw exception for terminating threads.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+ Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
+
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
+ path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+ boolean exception = false;
+ try {
+ mockFs.delete(sourceFolder, true);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and delete operation is failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads");
+ assertInLog(content, "Threads got interrupted Delete blob operation");
+ assertInLog(content,
+ "Delete failed as operation on subfolders and files failed.");
+ }
+
+ /*
+ * Validate that when a directory is deleted recursively, the operation succeeds
+ * even if a child directory delete fails because the directory does not exist.
+ * This can happen if a child directory is deleted by an external agent while
+ * the parent is in progress of being deleted recursively.
+ */
+ @Test
+ public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted()
+ throws Exception {
+ testRecusiveDirectoryDelete(true);
+ }
+
+ /*
+ * Validate that when a directory is deleted recursively, the operation succeeds
+ * even if a file delete fails because it does not exist.
+ * This can happen if a file is deleted by an external agent while
+ * the parent directory is in progress of being deleted.
+ */
+ @Test
+ public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse()
+ throws Exception {
+ testRecusiveDirectoryDelete(false);
+ }
+
+ private void testRecusiveDirectoryDelete(boolean useDir) throws Exception {
+ String childPathToBeDeletedByExternalAgent = (useDir)
+ ? "root/0"
+ : "root/0/fileToRename";
+ // Spy azure file system object and return false for deleting one file
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path(
+ childPathToBeDeletedByExternalAgent)));
+
+ Answer<Boolean> answer = new Answer<Boolean>() {
+ public Boolean answer(InvocationOnMock invocation) throws Throwable {
+ String path = (String) invocation.getArguments()[0];
+ boolean isDir = (boolean) invocation.getArguments()[1];
+ boolean realResult = fs.deleteFile(path, isDir);
+ assertTrue(realResult);
+ boolean fakeResult = false;
+ return fakeResult;
+ }
+ };
+
+ Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer);
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+
+ assertTrue(mockFs.delete(sourceFolder, true));
+ assertFalse(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled, that a child directory was
+ // deleted by an external caller, and the parent delete operation still
+ // succeeds.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads");
+ assertInLog(content, String.format("Attempt to delete non-existent %s %s",
+ useDir ? "directory" : "file", path));
+ }
+
+ /*
+ * Test case for delete operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testDeleteSingleDeleteException() throws Exception {
+
+ // Spy azure file system object and raise exception for deleting one file
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
+ Mockito.doThrow(new IOException()).when(mockFs).deleteFile(path, true);
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+
+ boolean exception = false;
+ try {
+ mockFs.delete(sourceFolder, true);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and delete operation failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Delete operation with threads");
+ assertInLog(content,
+ "Encountered Exception for Delete operation for file " + path);
+ assertInLog(content,
+ "Terminating execution of Delete operation now as some other thread already got exception or operation failed");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolExceptionFailure() throws Exception {
+
+ // Spy azure file system object and raise exception for new thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ ((NativeAzureFileSystem) fs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.doReturn(mockThreadPoolExecutor).when(mockFs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS);
+
+ validateRenameFolder(mockFs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content, "Failed to create thread pool with threads");
+ assertInLog(content, "Serializing the Rename operation");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolExecuteFailure() throws Exception {
+
+ // Mock thread pool executor to throw exception for all requests.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ validateRenameFolder(mockFs, "root", "rootnew");
+
+ // Validate from logs that threads are disabled.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Rejected execution of thread for Rename operation on blob");
+ assertInLog(content, "Serializing the Rename operation");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolExecuteSingleThreadFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ // Create a thread executor and link it to mocked thread pool executor object.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+ // Mock thread executor to throw exception for all requests.
+ Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+
+ validateRenameFolder(mockFs, "root", "rootnew");
+
+ // Validate from logs that threads are enabled and unused threads exists.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Rename operation with threads 7");
+ assertInLog(content,
+ "6 threads not used for Rename operation on blob");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameThreadPoolTerminationFailure() throws Exception {
+
+ // Spy azure file system object and return mocked thread pool
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Spy a thread pool executor and link it to azure file system object.
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
+ AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
+ mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
+
+ // With single iteration, we would have created 7 blobs resulting 7 threads.
+ Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
+ path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
+
+ // Mock thread executor to throw exception for all requests.
+ ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
+ Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
+ Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
+ Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
+
+
+ createFolder(mockFs, "root");
+ Path sourceFolder = new Path("root");
+ Path destFolder = new Path("rootnew");
+ boolean exception = false;
+ try {
+ mockFs.rename(sourceFolder, destFolder);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and rename operation is failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Rename operation with threads");
+ assertInLog(content, "Threads got interrupted Rename blob operation");
+ assertInLog(content,
+ "Rename failed as operation on subfolders and files failed.");
+ }
+
+ /*
+ * Test case for rename operation with multiple threads and flat listing enabled.
+ */
+ @Test
+ public void testRenameSingleRenameException() throws Exception {
+
+ // Spy azure file system object and raise exception for deleting one file
+ Path sourceFolder = new Path("root");
+ Path destFolder = new Path("rootnew");
+
+ // Spy azure file system object and populate rename pending spy object.
+ NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
+
+ // Populate data now only such that rename pending spy object would see this data.
+ createFolder(mockFs, "root");
+
+ String srcKey = mockFs.pathToKey(mockFs.makeAbsolute(sourceFolder));
+ String dstKey = mockFs.pathToKey(mockFs.makeAbsolute(destFolder));
+
+ FolderRenamePending mockRenameFs = Mockito.spy(mockFs.prepareAtomicFolderRename(srcKey, dstKey));
+ Mockito.when(mockFs.prepareAtomicFolderRename(srcKey, dstKey)).thenReturn(mockRenameFs);
+ String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
+ Mockito.doThrow(new IOException()).when(mockRenameFs).renameFile(Mockito.any(FileMetadata.class));
+
+ boolean exception = false;
+ try {
+ mockFs.rename(sourceFolder, destFolder);
+ } catch (IOException e){
+ exception = true;
+ }
+
+ assertTrue(exception);
+ assertTrue(mockFs.exists(sourceFolder));
+
+ // Validate from logs that threads are enabled and delete operation failed.
+ String content = logs.getOutput();
+ assertInLog(content,
+ "Using thread pool for Rename operation with threads");
+ assertInLog(content,
+ "Encountered Exception for Rename operation for file " + path);
+ assertInLog(content,
+ "Terminating execution of Rename operation now as some other thread already got exception or operation failed");
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
new file mode 100644
index 0000000..d7e4831
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+
+import static org.apache.hadoop.fs.azure.SecureStorageInterfaceImpl.KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS;
+
+/**
+ * Test class to hold all WASB authorization tests that use blob-specific keys
+ * to access storage.
+ */
+public class ITestNativeAzureFSAuthWithBlobSpecificKeys
+ extends ITestNativeAzureFileSystemAuthorizationWithOwner {
+
+
+ @Override
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
+ conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
+ return conf;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java
new file mode 100644
index 0000000..c73b1cc
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthorizationCaching.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE;
+
+/**
+ * Test class to hold all WASB authorization caching related tests.
+ */
+public class ITestNativeAzureFSAuthorizationCaching
+ extends ITestNativeAzureFileSystemAuthorizationWithOwner {
+
+ private static final int DUMMY_TTL_VALUE = 5000;
+
+ @Override
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
+ conf.set(KEY_AUTH_SERVICE_CACHING_ENABLE, "true");
+ return conf;
+ }
+
+ /**
+ * Test to verify cache behavior -- assert that PUT overwrites value if present
+ */
+ @Test
+ public void testCachePut() throws Throwable {
+ CachingAuthorizer<String, Integer> cache = new CachingAuthorizer<>(DUMMY_TTL_VALUE, "TEST");
+ cache.init(createConfiguration());
+ cache.put("TEST", 1);
+ cache.put("TEST", 3);
+ int result = cache.get("TEST");
+ assertEquals("Cache returned unexpected result", 3, result);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java
new file mode 100644
index 0000000..a4d8729
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSPageBlobLive.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Run the base Azure file system tests strictly on page blobs to make sure fundamental
+ * operations on page blob files and folders work as expected.
+ * These operations include create, delete, rename, list, and so on.
+ */
+public class ITestNativeAzureFSPageBlobLive extends
+ NativeAzureFileSystemBaseTest {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount()
+ throws Exception {
+ Configuration conf = new Configuration();
+
+ // Configure the page blob directories key so every file created is a page blob.
+ conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
+
+ // Configure the atomic rename directories key so every folder will have
+ // atomic rename applied.
+ conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
new file mode 100644
index 0000000..29611bf
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Arrays;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import org.junit.Test;
+
+/**
+ * Test append operations.
+ */
+public class ITestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
+
+ private Path testPath;
+
+ @Override
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
+ conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME,
+ true);
+ return conf;
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = methodPath();
+ }
+
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(createConfiguration());
+ }
+
+ /*
+ * Helper method that creates test data of size provided by the
+ * "size" parameter.
+ */
+ private static byte[] getTestData(int size) {
+ byte[] testData = new byte[size];
+ System.arraycopy(RandomStringUtils.randomAlphabetic(size).getBytes(), 0, testData, 0, size);
+ return testData;
+ }
+
+ // Helper method to create file and write fileSize bytes of data on it.
+ private byte[] createBaseFileWithData(int fileSize, Path testPath) throws Throwable {
+
+ try(FSDataOutputStream createStream = fs.create(testPath)) {
+ byte[] fileData = null;
+
+ if (fileSize != 0) {
+ fileData = getTestData(fileSize);
+ createStream.write(fileData);
+ }
+ return fileData;
+ }
+ }
+
+ /*
+ * Helper method to verify a file data equal to "dataLength" parameter
+ */
+ private boolean verifyFileData(int dataLength, byte[] testData, int testDataIndex,
+ FSDataInputStream srcStream) {
+
+ try {
+
+ byte[] fileBuffer = new byte[dataLength];
+ byte[] testDataBuffer = new byte[dataLength];
+
+ int fileBytesRead = srcStream.read(fileBuffer);
+
+ if (fileBytesRead < dataLength) {
+ return false;
+ }
+
+ System.arraycopy(testData, testDataIndex, testDataBuffer, 0, dataLength);
+
+ if (!Arrays.equals(fileBuffer, testDataBuffer)) {
+ return false;
+ }
+
+ return true;
+
+ } catch (Exception ex) {
+ return false;
+ }
+
+ }
+
+ /*
+ * Helper method to verify Append on a testFile.
+ */
+ private boolean verifyAppend(byte[] testData, Path testFile) {
+
+ try(FSDataInputStream srcStream = fs.open(testFile)) {
+
+ int baseBufferSize = 2048;
+ int testDataSize = testData.length;
+ int testDataIndex = 0;
+
+ while (testDataSize > baseBufferSize) {
+
+ if (!verifyFileData(baseBufferSize, testData, testDataIndex, srcStream)) {
+ return false;
+ }
+ testDataIndex += baseBufferSize;
+ testDataSize -= baseBufferSize;
+ }
+
+ if (!verifyFileData(testDataSize, testData, testDataIndex, srcStream)) {
+ return false;
+ }
+
+ return true;
+ } catch(Exception ex) {
+ return false;
+ }
+ }
+
+ /*
+ * Test case to verify if an append on small size data works. This tests
+ * append E2E
+ */
+ @Test
+ public void testSingleAppend() throws Throwable{
+
+ FSDataOutputStream appendStream = null;
+ try {
+ int baseDataSize = 50;
+ byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
+
+ int appendDataSize = 20;
+ byte[] appendDataBuffer = getTestData(appendDataSize);
+ appendStream = fs.append(testPath, 10);
+ appendStream.write(appendDataBuffer);
+ appendStream.close();
+ byte[] testData = new byte[baseDataSize + appendDataSize];
+ System.arraycopy(baseDataBuffer, 0, testData, 0, baseDataSize);
+ System.arraycopy(appendDataBuffer, 0, testData, baseDataSize, appendDataSize);
+
+ assertTrue(verifyAppend(testData, testPath));
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ /*
+ * Test case to verify append to an empty file.
+ */
+ @Test
+ public void testSingleAppendOnEmptyFile() throws Throwable {
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+ createBaseFileWithData(0, testPath);
+
+ int appendDataSize = 20;
+ byte[] appendDataBuffer = getTestData(appendDataSize);
+ appendStream = fs.append(testPath, 10);
+ appendStream.write(appendDataBuffer);
+ appendStream.close();
+
+ assertTrue(verifyAppend(appendDataBuffer, testPath));
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ /*
+ * Test to verify that we can open only one Append stream on a File.
+ */
+ @Test
+ public void testSingleAppenderScenario() throws Throwable {
+
+ FSDataOutputStream appendStream1 = null;
+ FSDataOutputStream appendStream2 = null;
+ IOException ioe = null;
+ try {
+ createBaseFileWithData(0, testPath);
+ appendStream1 = fs.append(testPath, 10);
+ boolean encounteredException = false;
+ try {
+ appendStream2 = fs.append(testPath, 10);
+ } catch(IOException ex) {
+ encounteredException = true;
+ ioe = ex;
+ }
+
+ appendStream1.close();
+
+ assertTrue(encounteredException);
+ GenericTestUtils.assertExceptionContains("Unable to set Append lease on the Blob", ioe);
+ } finally {
+ if (appendStream1 != null) {
+ appendStream1.close();
+ }
+
+ if (appendStream2 != null) {
+ appendStream2.close();
+ }
+ }
+ }
+
+ /*
+ * Tests to verify multiple appends on a Blob.
+ */
+ @Test
+ public void testMultipleAppends() throws Throwable {
+
+ int baseDataSize = 50;
+ byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
+
+ int appendDataSize = 100;
+ int targetAppendCount = 50;
+ byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
+ int testDataIndex = 0;
+ System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
+ testDataIndex += baseDataSize;
+
+ int appendCount = 0;
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+ while (appendCount < targetAppendCount) {
+
+ byte[] appendDataBuffer = getTestData(appendDataSize);
+ appendStream = fs.append(testPath, 30);
+ appendStream.write(appendDataBuffer);
+ appendStream.close();
+
+ System.arraycopy(appendDataBuffer, 0, testData, testDataIndex, appendDataSize);
+ testDataIndex += appendDataSize;
+ appendCount++;
+ }
+
+ assertTrue(verifyAppend(testData, testPath));
+
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ /*
+ * Test to verify we multiple appends on the same stream.
+ */
+ @Test
+ public void testMultipleAppendsOnSameStream() throws Throwable {
+
+ int baseDataSize = 50;
+ byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
+ int appendDataSize = 100;
+ int targetAppendCount = 50;
+ byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
+ int testDataIndex = 0;
+ System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
+ testDataIndex += baseDataSize;
+ int appendCount = 0;
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+
+ while (appendCount < targetAppendCount) {
+
+ appendStream = fs.append(testPath, 50);
+
+ int singleAppendChunkSize = 20;
+ int appendRunSize = 0;
+ while (appendRunSize < appendDataSize) {
+
+ byte[] appendDataBuffer = getTestData(singleAppendChunkSize);
+ appendStream.write(appendDataBuffer);
+ System.arraycopy(appendDataBuffer, 0, testData,
+ testDataIndex + appendRunSize, singleAppendChunkSize);
+
+ appendRunSize += singleAppendChunkSize;
+ }
+
+ appendStream.close();
+ testDataIndex += appendDataSize;
+ appendCount++;
+ }
+
+ assertTrue(verifyAppend(testData, testPath));
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+ @Test(expected=UnsupportedOperationException.class)
+ /*
+ * Test to verify the behavior when Append Support configuration flag is set to false
+ */
+ public void testFalseConfigurationFlagBehavior() throws Throwable {
+
+ fs = testAccount.getFileSystem();
+ Configuration conf = fs.getConf();
+ conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, false);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+
+ FSDataOutputStream appendStream = null;
+
+ try {
+ createBaseFileWithData(0, testPath);
+ appendStream = fs.append(testPath, 10);
+ } finally {
+ if (appendStream != null) {
+ appendStream.close();
+ }
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java
new file mode 100644
index 0000000..869a31c
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAtomicRenameDirList.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.junit.Test;
+
+/**
+ * Test atomic renaming.
+ */
+public class ITestNativeAzureFileSystemAtomicRenameDirList
+ extends AbstractWasbTestBase {
+
+ // HBase-site config controlling HBase root dir
+ private static final String HBASE_ROOT_DIR_CONF_STRING = "hbase.rootdir";
+ private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS =
+ "wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Test
+ public void testAtomicRenameKeyDoesntNPEOnInitializingWithNonDefaultURI()
+ throws IOException {
+ NativeAzureFileSystem azureFs = fs;
+ AzureNativeFileSystemStore azureStore = azureFs.getStore();
+ Configuration conf = fs.getConf();
+ conf.set(HBASE_ROOT_DIR_CONF_STRING, HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS);
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+ azureStore.isAtomicRenameKey("anyrandomkey");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java
new file mode 100644
index 0000000..3ec42f0
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAuthorizationWithOwner.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test class that runs wasb authorization tests with owner check enabled.
+ */
+public class ITestNativeAzureFileSystemAuthorizationWithOwner
+ extends TestNativeAzureFileSystemAuthorization {
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ authorizer.init(fs.getConf(), true);
+ }
+
+ /**
+ * Test case when owner matches current user.
+ */
+ @Test
+ public void testOwnerPermissionPositive() throws Throwable {
+
+ Path parentDir = new Path("/testOwnerPermissionPositive");
+ Path testPath = new Path(parentDir, "test.data");
+
+ authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
+ authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
+ authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
+ // additional rule used for assertPathExists
+ authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.READ.toString(), true);
+ fs.updateWasbAuthorizer(authorizer);
+
+ try {
+ // creates parentDir with owner as current user
+ fs.mkdirs(parentDir);
+ ContractTestUtils.assertPathExists(fs, "parentDir does not exist", parentDir);
+
+ fs.create(testPath);
+ fs.getFileStatus(testPath);
+ ContractTestUtils.assertPathExists(fs, "testPath does not exist", testPath);
+
+ } finally {
+ allowRecursiveDelete(fs, parentDir.toString());
+ fs.delete(parentDir, true);
+ }
+ }
+
+ /**
+ * Negative test case for owner does not match current user.
+ */
+ @Test
+ public void testOwnerPermissionNegative() throws Throwable {
+ expectedEx.expect(WasbAuthorizationException.class);
+
+ Path parentDir = new Path("/testOwnerPermissionNegative");
+ Path childDir = new Path(parentDir, "childDir");
+
+ setExpectedFailureMessage("mkdirs", childDir);
+
+ authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
+ authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
+
+ fs.updateWasbAuthorizer(authorizer);
+
+ try{
+ fs.mkdirs(parentDir);
+ UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting(
+ "testuser", new String[] {});
+
+ ugiSuperUser.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ fs.mkdirs(childDir);
+ return null;
+ }
+ });
+
+ } finally {
+ allowRecursiveDelete(fs, parentDir.toString());
+ fs.delete(parentDir, true);
+ }
+ }
+
+ /**
+ * Test to verify that retrieving owner information does not
+ * throw when file/folder does not exist.
+ */
+ @Test
+ public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {
+
+ Path testdirectory = new Path("/testDirectory123454565");
+
+ String owner = fs.getOwnerForPath(testdirectory);
+ assertEquals("", owner);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
new file mode 100644
index 0000000..f73a763
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.net.URI;
+import java.util.StringTokenizer;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+/**
+ * Test to validate Azure storage client side logging. Tests works only when
+ * testing with Live Azure storage because Emulator does not have support for
+ * client-side logging.
+ *
+ * <I>Important: </I> Do not attempt to move off commons-logging.
+ * The tests will fail.
+ */
+public class ITestNativeAzureFileSystemClientLogging
+ extends AbstractWasbTestBase {
+
+ // Core-site config controlling Azure Storage Client logging
+ private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
+
+ // Temporary directory created using WASB.
+ private static final String TEMP_DIR = "tempDir";
+
+ /*
+ * Helper method to verify the client logging is working. This check primarily
+ * checks to make sure we see a line in the logs corresponding to the entity
+ * that is created during test run.
+ */
+ private boolean verifyStorageClientLogs(String capturedLogs, String entity)
+ throws Exception {
+
+ URI uri = testAccount.getRealAccount().getBlobEndpoint();
+ String container = testAccount.getRealContainer().getName();
+ String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
+ + entity;
+ boolean entityFound = false;
+
+ StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
+
+ while (tokenizer.hasMoreTokens()) {
+ String token = tokenizer.nextToken();
+ if (token.contains(validateString)) {
+ entityFound = true;
+ break;
+ }
+ }
+ return entityFound;
+ }
+
+ /*
+ * Helper method that updates the core-site config to enable/disable logging.
+ */
+ private void updateFileSystemConfiguration(Boolean loggingFlag)
+ throws Exception {
+
+ Configuration conf = fs.getConf();
+ conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
+ URI uri = fs.getUri();
+ fs.initialize(uri, conf);
+ }
+
+ // Using WASB code to communicate with Azure Storage.
+ private void performWASBOperations() throws Exception {
+
+ Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
+ fs.mkdirs(tempDir);
+ fs.delete(tempDir, true);
+ }
+
+ @Test
+ public void testLoggingEnabled() throws Exception {
+
+ LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
+ .getRootLogger()));
+
+ // Update configuration based on the Test.
+ updateFileSystemConfiguration(true);
+
+ performWASBOperations();
+
+ String output = getLogOutput(logs);
+ assertTrue("Log entry " + TEMP_DIR + " not found in " + output,
+ verifyStorageClientLogs(output, TEMP_DIR));
+ }
+
+ protected String getLogOutput(LogCapturer logs) {
+ String output = logs.getOutput();
+ assertTrue("No log created/captured", !output.isEmpty());
+ return output;
+ }
+
+ @Test
+ public void testLoggingDisabled() throws Exception {
+
+ LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
+ .getRootLogger()));
+
+ // Update configuration based on the Test.
+ updateFileSystemConfiguration(false);
+
+ performWASBOperations();
+ String output = getLogOutput(logs);
+
+ assertFalse("Log entry " + TEMP_DIR + " found in " + output,
+ verifyStorageClientLogs(output, TEMP_DIR));
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
new file mode 100644
index 0000000..87cac15
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+/***
+ * Test class to hold all Live Azure storage concurrency tests.
+ */
+public class ITestNativeAzureFileSystemConcurrencyLive
+ extends AbstractWasbTestBase {
+
+ private static final int THREAD_COUNT = 102;
+ private static final int TEST_EXECUTION_TIMEOUT = 5000;
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ /**
+ * Validate contract for FileSystem.create when overwrite is true and there
+ * are concurrent callers of FileSystem.delete. An existing file should be
+ * overwritten, even if the original destination exists but is deleted by an
+ * external agent during the create operation.
+ */
+ @Test(timeout = TEST_EXECUTION_TIMEOUT)
+ public void testConcurrentCreateDeleteFile() throws Exception {
+ Path testFile = methodPath();
+
+ List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ tasks.add(new CreateFileTask(fs, testFile));
+ }
+
+ ExecutorService es = null;
+
+ try {
+ es = Executors.newFixedThreadPool(THREAD_COUNT);
+
+ List<Future<Void>> futures = es.invokeAll(tasks);
+
+ for (Future<Void> future : futures) {
+ Assert.assertTrue(future.isDone());
+
+ // we are using Callable<V>, so if an exception
+ // occurred during the operation, it will be thrown
+ // when we call get
+ Assert.assertEquals(null, future.get());
+ }
+ } finally {
+ if (es != null) {
+ es.shutdownNow();
+ }
+ }
+ }
+
+ /**
+ * Validate contract for FileSystem.delete when invoked concurrently.
+ * One of the threads should successfully delete the file and return true;
+ * all other threads should return false.
+ */
+ @Test(timeout = TEST_EXECUTION_TIMEOUT)
+ public void testConcurrentDeleteFile() throws Exception {
+ Path testFile = new Path("test.dat");
+ fs.create(testFile).close();
+
+ List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT);
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ tasks.add(new DeleteFileTask(fs, testFile));
+ }
+
+ ExecutorService es = null;
+ try {
+ es = Executors.newFixedThreadPool(THREAD_COUNT);
+
+ List<Future<Boolean>> futures = es.invokeAll(tasks);
+
+ int successCount = 0;
+ for (Future<Boolean> future : futures) {
+ Assert.assertTrue(future.isDone());
+
+ // we are using Callable<V>, so if an exception
+ // occurred during the operation, it will be thrown
+ // when we call get
+ Boolean success = future.get();
+ if (success) {
+ successCount++;
+ }
+ }
+
+ Assert.assertEquals(
+ "Exactly one delete operation should return true.",
+ 1,
+ successCount);
+ } finally {
+ if (es != null) {
+ es.shutdownNow();
+ }
+ }
+ }
+
+ abstract class FileSystemTask<V> implements Callable<V> {
+ private final FileSystem fileSystem;
+ private final Path path;
+
+ protected FileSystem getFileSystem() {
+ return this.fileSystem;
+ }
+
+ protected Path getFilePath() {
+ return this.path;
+ }
+
+ FileSystemTask(FileSystem fs, Path p) {
+ this.fileSystem = fs;
+ this.path = p;
+ }
+
+ public abstract V call() throws Exception;
+ }
+
+ class DeleteFileTask extends FileSystemTask<Boolean> {
+
+ DeleteFileTask(FileSystem fs, Path p) {
+ super(fs, p);
+ }
+
+ @Override
+ public Boolean call() throws Exception {
+ return this.getFileSystem().delete(this.getFilePath(), false);
+ }
+ }
+
+ class CreateFileTask extends FileSystemTask<Void> {
+ CreateFileTask(FileSystem fs, Path p) {
+ super(fs, p);
+ }
+
+ public Void call() throws Exception {
+ FileSystem fs = getFileSystem();
+ Path p = getFilePath();
+
+ // Create an empty file and close the stream.
+ FSDataOutputStream stream = fs.create(p, true);
+ stream.close();
+
+ // Delete the file. We don't care if delete returns true or false.
+ // We just want to ensure the file does not exist.
+ this.getFileSystem().delete(this.getFilePath(), false);
+
+ return null;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
new file mode 100644
index 0000000..4836fc4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assume.assumeNotNull;
+
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+/**
+ * Run the {@code FileSystemContractBaseTest} tests against the emulator
+ */
+public class ITestNativeAzureFileSystemContractEmulator extends
+ FileSystemContractBaseTest {
+ private AzureBlobStorageTestAccount testAccount;
+ private Path basePath;
+
+ @Rule
+ public TestName methodName = new TestName();
+
+ private void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ nameThread();
+ testAccount = AzureBlobStorageTestAccount.createForEmulator();
+ if (testAccount != null) {
+ fs = testAccount.getFileSystem();
+ }
+ assumeNotNull(fs);
+ basePath = fs.makeQualified(
+ AzureTestUtils.createTestPath(
+ new Path("ITestNativeAzureFileSystemContractEmulator")));
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ fs = null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
new file mode 100644
index 0000000..d3d1bd8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assume.assumeNotNull;
+
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+/**
+ * Run the {@link FileSystemContractBaseTest} test suite against azure storage.
+ */
+public class ITestNativeAzureFileSystemContractLive extends
+ FileSystemContractBaseTest {
+ private AzureBlobStorageTestAccount testAccount;
+ private Path basePath;
+
+ @Rule
+ public TestName methodName = new TestName();
+
+ private void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ nameThread();
+ testAccount = AzureBlobStorageTestAccount.create();
+ if (testAccount != null) {
+ fs = testAccount.getFileSystem();
+ }
+ assumeNotNull(fs);
+ basePath = fs.makeQualified(
+ AzureTestUtils.createTestPath(
+ new Path("NativeAzureFileSystemContractLive")));
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ fs = null;
+ }
+
+ @Override
+ public Path getTestBaseDir() {
+ return basePath;
+ }
+
+ protected int getGlobalTimeout() {
+ return AzureTestConstants.AZURE_TEST_TIMEOUT;
+ }
+
+ /**
+ * The following tests are failing on Azure and the Azure
+ * file system code needs to be modified to make them pass.
+ * A separate work item has been opened for this.
+ */
+ @Ignore
+ @Test
+ public void testMoveFileUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameFileToSelf() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameChildDirForbidden() throws Exception {
+ }
+
+ @Ignore
+ @Test
+ public void testMoveDirUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameDirToSelf() throws Throwable {
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
new file mode 100644
index 0000000..03e90aa
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import static org.junit.Assume.assumeNotNull;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+/**
+ * Run the {@link FileSystemContractBaseTest} test suite against azure
+ * storage, after switching the FS using page blobs everywhere.
+ */
+public class ITestNativeAzureFileSystemContractPageBlobLive extends
+ FileSystemContractBaseTest {
+ private AzureBlobStorageTestAccount testAccount;
+ private Path basePath;
+ @Rule
+ public TestName methodName = new TestName();
+
+ private void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ private AzureBlobStorageTestAccount createTestAccount()
+ throws Exception {
+ Configuration conf = new Configuration();
+
+ // Configure the page blob directories key so every file created is a page blob.
+ conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
+
+ // Configure the atomic rename directories key so every folder will have
+ // atomic rename applied.
+ conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ testAccount = createTestAccount();
+ assumeNotNull(testAccount);
+ fs = testAccount.getFileSystem();
+ basePath = AzureTestUtils.pathForTests(fs, "filesystemcontractpageblob");
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ fs = null;
+ }
+
+ protected int getGlobalTimeout() {
+ return AzureTestConstants.AZURE_TEST_TIMEOUT;
+ }
+
+ @Override
+ public Path getTestBaseDir() {
+ return basePath;
+ }
+
+ /**
+ * The following tests are failing on Azure and the Azure
+ * file system code needs to be modified to make them pass.
+ * A separate work item has been opened for this.
+ */
+ @Ignore
+ @Test
+ public void testMoveFileUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameFileToSelf() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameChildDirForbidden() throws Exception {
+ }
+
+ @Ignore
+ @Test
+ public void testMoveDirUnderParent() throws Throwable {
+ }
+
+ @Ignore
+ @Test
+ public void testRenameDirToSelf() throws Throwable {
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
new file mode 100644
index 0000000..0b72f06
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Constants for the Azure tests.
+ */
+public interface AzureTestConstants {
+
+ /**
+ * Prefix for any cross-filesystem scale test options.
+ */
+ String SCALE_TEST = "scale.test.";
+
+ /**
+ * Prefix for wasb-specific scale tests.
+ */
+ String AZURE_SCALE_TEST = "fs.azure.scale.test.";
+
+ /**
+ * Prefix for FS wasb tests.
+ */
+ String TEST_FS_WASB = "test.fs.azure.";
+
+ /**
+ * Name of the test filesystem.
+ */
+ String TEST_FS_WASB_NAME = TEST_FS_WASB + "name";
+
+ /**
+ * Tell tests that they are being executed in parallel: {@value}.
+ */
+ String KEY_PARALLEL_TEST_EXECUTION = "test.parallel.execution";
+
+ /**
+ * A property set to true in maven if scale tests are enabled: {@value}.
+ */
+ String KEY_SCALE_TESTS_ENABLED = AZURE_SCALE_TEST + "enabled";
+
+ /**
+ * The number of operations to perform: {@value}.
+ */
+ String KEY_OPERATION_COUNT = SCALE_TEST + "operation.count";
+
+ /**
+ * The number of directory operations to perform: {@value}.
+ */
+ String KEY_DIRECTORY_COUNT = SCALE_TEST + "directory.count";
+
+ /**
+ * The readahead buffer: {@value}.
+ */
+ String KEY_READ_BUFFER_SIZE = AZURE_SCALE_TEST + "read.buffer.size";
+
+ int DEFAULT_READ_BUFFER_SIZE = 16384;
+
+ /**
+ * Key for a multi MB test file: {@value}.
+ */
+ String KEY_CSVTEST_FILE = AZURE_SCALE_TEST + "csvfile";
+
+ /**
+ * Default path for the multi MB test file: {@value}.
+ */
+ String DEFAULT_CSVTEST_FILE = "wasb://datasets@azuremlsampleexperiments.blob.core.windows.net/network_intrusion_detection.csv";
+
+ /**
+ * Name of the property to define the timeout for scale tests: {@value}.
+ * Measured in seconds.
+ */
+ String KEY_TEST_TIMEOUT = AZURE_SCALE_TEST + "timeout";
+
+ /**
+ * Name of the property to define the file size for the huge file
+ * tests: {@value}.
+ * Measured in KB; a suffix like "M", or "G" will change the unit.
+ */
+ String KEY_HUGE_FILESIZE = AZURE_SCALE_TEST + "huge.filesize";
+
+ /**
+ * Name of the property to define the partition size for the huge file
+ * tests: {@value}.
+ * Measured in KB; a suffix like "M", or "G" will change the unit.
+ */
+ String KEY_HUGE_PARTITION_SIZE = AZURE_SCALE_TEST + "huge.partitionsize";
+
+ /**
+ * The default huge size is small —full 5GB+ scale tests are something
+ * to run in long test runs on EC2 VMs. {@value}.
+ */
+ String DEFAULT_HUGE_FILESIZE = "10M";
+
+ /**
+ * The default number of operations to perform: {@value}.
+ */
+ long DEFAULT_OPERATION_COUNT = 2005;
+
+ /**
+ * Default number of directories to create when performing
+ * directory performance/scale tests.
+ */
+ int DEFAULT_DIRECTORY_COUNT = 2;
+
+ /**
+ * Default policy on scale tests: {@value}.
+ */
+ boolean DEFAULT_SCALE_TESTS_ENABLED = false;
+
+ /**
+ * Fork ID passed down from maven if the test is running in parallel.
+ */
+ String TEST_UNIQUE_FORK_ID = "test.unique.fork.id";
+
+ /**
+ * Timeout in Milliseconds for standard tests: {@value}.
+ */
+ int AZURE_TEST_TIMEOUT = 10 * 60 * 1000;
+
+ /**
+ * Timeout in Seconds for Scale Tests: {@value}.
+ */
+ int SCALE_TEST_TIMEOUT_SECONDS = 30 * 60;
+
+ int SCALE_TEST_TIMEOUT_MILLIS = SCALE_TEST_TIMEOUT_SECONDS * 1000;
+
+
+
+ String ACCOUNT_KEY_PROPERTY_NAME
+ = "fs.azure.account.key.";
+ String SAS_PROPERTY_NAME = "fs.azure.sas.";
+ String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
+ String TEST_ACCOUNT_NAME_PROPERTY_NAME
+ = "fs.azure.test.account.name";
+ String MOCK_ACCOUNT_NAME
+ = "mockAccount.blob.core.windows.net";
+ String MOCK_CONTAINER_NAME = "mockContainer";
+ String WASB_AUTHORITY_DELIMITER = "@";
+ String WASB_SCHEME = "wasb";
+ String PATH_DELIMITER = "/";
+ String AZURE_ROOT_CONTAINER = "$root";
+ String MOCK_WASB_URI = "wasb://" + MOCK_CONTAINER_NAME
+ + WASB_AUTHORITY_DELIMITER + MOCK_ACCOUNT_NAME + "/";
+ String USE_EMULATOR_PROPERTY_NAME
+ = "fs.azure.test.emulator";
+
+ String KEY_DISABLE_THROTTLING
+ = "fs.azure.disable.bandwidth.throttling";
+ String KEY_READ_TOLERATE_CONCURRENT_APPEND
+ = "fs.azure.io.read.tolerate.concurrent.append";
+ /**
+ * Path for page blobs: {@value}.
+ */
+ String DEFAULT_PAGE_BLOB_DIRECTORY = "pageBlobs";
+
+ String DEFAULT_ATOMIC_RENAME_DIRECTORIES
+ = "/atomicRenameDir1,/atomicRenameDir2";
+
+ /**
+ * Base directory for page blobs.
+ */
+ Path PAGE_BLOB_DIR = new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
new file mode 100644
index 0000000..2fbbcd1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
@@ -0,0 +1,479 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.internal.AssumptionViolatedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestConstants.*;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getLongGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+
+/**
+ * Utilities for the Azure tests. Based on {@code S3ATestUtils}, so
+ * (initially) has unused method.
+ */
+public final class AzureTestUtils extends Assert {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ AzureTestUtils.class);
+
+ /**
+ * Value to set a system property to (in maven) to declare that
+ * a property has been unset.
+ */
+ public static final String UNSET_PROPERTY = "unset";
+
+ /**
+ * Create the test filesystem.
+ *
+ * If the test.fs.wasb.name property is not set, this will
+ * raise a JUnit assumption exception
+ *
+ * @param conf configuration
+ * @return the FS
+ * @throws IOException IO Problems
+ * @throws AssumptionViolatedException if the FS is not named
+ */
+ public static NativeAzureFileSystem createTestFileSystem(Configuration conf)
+ throws IOException {
+
+ String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
+
+ boolean liveTest = !StringUtils.isEmpty(fsname);
+ URI testURI = null;
+ if (liveTest) {
+ testURI = URI.create(fsname);
+ liveTest = testURI.getScheme().equals(WASB_SCHEME);
+ }
+ if (!liveTest) {
+ // Skip the test
+ throw new AssumptionViolatedException(
+ "No test filesystem in " + TEST_FS_WASB_NAME);
+ }
+ NativeAzureFileSystem fs1 = new NativeAzureFileSystem();
+ fs1.initialize(testURI, conf);
+ return fs1;
+ }
+
+ /**
+ * Create a file context for tests.
+ *
+ * If the test.fs.wasb.name property is not set, this will
+ * trigger a JUnit failure.
+ *
+ * Multipart purging is enabled.
+ * @param conf configuration
+ * @return the FS
+ * @throws IOException IO Problems
+ * @throws AssumptionViolatedException if the FS is not named
+ */
+ public static FileContext createTestFileContext(Configuration conf)
+ throws IOException {
+ String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
+
+ boolean liveTest = !StringUtils.isEmpty(fsname);
+ URI testURI = null;
+ if (liveTest) {
+ testURI = URI.create(fsname);
+ liveTest = testURI.getScheme().equals(WASB_SCHEME);
+ }
+ if (!liveTest) {
+ // This doesn't work with our JUnit 3 style test cases, so instead we'll
+ // make this whole class not run by default
+ throw new AssumptionViolatedException("No test filesystem in "
+ + TEST_FS_WASB_NAME);
+ }
+ FileContext fc = FileContext.getFileContext(testURI, conf);
+ return fc;
+ }
+
+ /**
+ * Get a long test property.
+ * <ol>
+ * <li>Look up configuration value (which can pick up core-default.xml),
+ * using {@code defVal} as the default value (if conf != null).
+ * </li>
+ * <li>Fetch the system property.</li>
+ * <li>If the system property is not empty or "(unset)":
+ * it overrides the conf value.
+ * </li>
+ * </ol>
+ * This puts the build properties in charge of everything. It's not a
+ * perfect design; having maven set properties based on a file, as ant let
+ * you do, is better for customization.
+ *
+ * As to why there's a special (unset) value, see
+ * {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
+ * @param conf config: may be null
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static long getTestPropertyLong(Configuration conf,
+ String key, long defVal) {
+ return Long.valueOf(
+ getTestProperty(conf, key, Long.toString(defVal)));
+ }
+ /**
+ * Get a test property value in bytes, using k, m, g, t, p, e suffixes.
+ * {@link org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix#string2long(String)}
+ * <ol>
+ * <li>Look up configuration value (which can pick up core-default.xml),
+ * using {@code defVal} as the default value (if conf != null).
+ * </li>
+ * <li>Fetch the system property.</li>
+ * <li>If the system property is not empty or "(unset)":
+ * it overrides the conf value.
+ * </li>
+ * </ol>
+ * This puts the build properties in charge of everything. It's not a
+ * perfect design; having maven set properties based on a file, as ant let
+ * you do, is better for customization.
+ *
+ * As to why there's a special (unset) value, see
+ * {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
+ * @param conf config: may be null
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static long getTestPropertyBytes(Configuration conf,
+ String key, String defVal) {
+ return org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix
+ .string2long(getTestProperty(conf, key, defVal));
+ }
+
+ /**
+ * Get an integer test property; algorithm described in
+ * {@link #getTestPropertyLong(Configuration, String, long)}.
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static int getTestPropertyInt(Configuration conf,
+ String key, int defVal) {
+ return (int) getTestPropertyLong(conf, key, defVal);
+ }
+
+ /**
+ * Get a boolean test property; algorithm described in
+ * {@link #getTestPropertyLong(Configuration, String, long)}.
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static boolean getTestPropertyBool(Configuration conf,
+ String key,
+ boolean defVal) {
+ return Boolean.valueOf(
+ getTestProperty(conf, key, Boolean.toString(defVal)));
+ }
+
+ /**
+ * Get a string test property.
+ * <ol>
+ * <li>Look up configuration value (which can pick up core-default.xml),
+ * using {@code defVal} as the default value (if conf != null).
+ * </li>
+ * <li>Fetch the system property.</li>
+ * <li>If the system property is not empty or "(unset)":
+ * it overrides the conf value.
+ * </li>
+ * </ol>
+ * This puts the build properties in charge of everything. It's not a
+ * perfect design; having maven set properties based on a file, as ant let
+ * you do, is better for customization.
+ *
+ * As to why there's a special (unset) value, see
+ * @see <a href="http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven">
+ * Stack Overflow</a>
+ * @param conf config: may be null
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+
+ public static String getTestProperty(Configuration conf,
+ String key,
+ String defVal) {
+ String confVal = conf != null
+ ? conf.getTrimmed(key, defVal)
+ : defVal;
+ String propval = System.getProperty(key);
+ return StringUtils.isNotEmpty(propval) && !UNSET_PROPERTY.equals(propval)
+ ? propval : confVal;
+ }
+
+ /**
+ * Verify the class of an exception. If it is not as expected, rethrow it.
+ * Comparison is on the exact class, not subclass-of inference as
+ * offered by {@code instanceof}.
+ * @param clazz the expected exception class
+ * @param ex the exception caught
+ * @return the exception, if it is of the expected class
+ * @throws Exception the exception passed in.
+ */
+ public static Exception verifyExceptionClass(Class clazz,
+ Exception ex)
+ throws Exception {
+ if (!(ex.getClass().equals(clazz))) {
+ throw ex;
+ }
+ return ex;
+ }
+
+ /**
+ * Turn off FS Caching: use if a filesystem with different options from
+ * the default is required.
+ * @param conf configuration to patch
+ */
+ public static void disableFilesystemCaching(Configuration conf) {
+ conf.setBoolean("fs.wasb.impl.disable.cache", true);
+ }
+
+ /**
+ * Create a test path, using the value of
+ * {@link AzureTestUtils#TEST_UNIQUE_FORK_ID} if it is set.
+ * @param defVal default value
+ * @return a path
+ */
+ public static Path createTestPath(Path defVal) {
+ String testUniqueForkId = System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID);
+ return testUniqueForkId == null
+ ? defVal
+ : new Path("/" + testUniqueForkId, "test");
+ }
+
+ /**
+ * Create a test page blob path using the value of
+ * {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
+ * @param filename filename at the end of the path
+ * @return an absolute path
+ */
+ public static Path blobPathForTests(FileSystem fs, String filename) {
+ String testUniqueForkId = System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID);
+ return fs.makeQualified(new Path(PAGE_BLOB_DIR,
+ testUniqueForkId == null
+ ? filename
+ : (testUniqueForkId + "/" + filename)));
+ }
+
+ /**
+ * Create a test path using the value of
+ * {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
+ * @param filename filename at the end of the path
+ * @return an absolute path
+ */
+ public static Path pathForTests(FileSystem fs, String filename) {
+ String testUniqueForkId = System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID);
+ return fs.makeQualified(new Path(
+ testUniqueForkId == null
+ ? ("/test/" + filename)
+ : (testUniqueForkId + "/" + filename)));
+ }
+
+ /**
+ * Get a unique fork ID.
+ * Returns a default value for non-parallel tests.
+ * @return a string unique for all test VMs running in this maven build.
+ */
+ public static String getForkID() {
+ return System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID, "fork-1");
+ }
+
+ /**
+ * Flag to indicate that this test is being executed in parallel.
+ * This is used by some of the scale tests to validate test time expectations.
+ * @return true if the build indicates this test is being run in parallel.
+ */
+ public static boolean isParallelExecution() {
+ return Boolean.getBoolean(KEY_PARALLEL_TEST_EXECUTION);
+ }
+
+ /**
+ * Asserts that {@code obj} is an instance of {@code expectedClass} using a
+ * descriptive assertion message.
+ * @param expectedClass class
+ * @param obj object to check
+ */
+ public static void assertInstanceOf(Class<?> expectedClass, Object obj) {
+ Assert.assertTrue(String.format("Expected instance of class %s, but is %s.",
+ expectedClass, obj.getClass()),
+ expectedClass.isAssignableFrom(obj.getClass()));
+ }
+
+ /**
+ * Builds a comma-separated list of class names.
+ * @param classes list of classes
+ * @return comma-separated list of class names
+ */
+ public static <T extends Class<?>> String buildClassListString(
+ List<T> classes) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < classes.size(); ++i) {
+ if (i > 0) {
+ sb.append(',');
+ }
+ sb.append(classes.get(i).getName());
+ }
+ return sb.toString();
+ }
+
+ /**
+ * This class should not be instantiated.
+ */
+ private AzureTestUtils() {
+ }
+
+ /**
+ * Assert that a configuration option matches the expected value.
+ * @param conf configuration
+ * @param key option key
+ * @param expected expected value
+ */
+ public static void assertOptionEquals(Configuration conf,
+ String key,
+ String expected) {
+ assertEquals("Value of " + key, expected, conf.get(key));
+ }
+
+ /**
+ * Assume that a condition is met. If not: log at WARN and
+ * then throw an {@link AssumptionViolatedException}.
+ * @param message message in an assumption
+ * @param condition condition to probe
+ */
+ public static void assume(String message, boolean condition) {
+ if (!condition) {
+ LOG.warn(message);
+ }
+ Assume.assumeTrue(message, condition);
+ }
+
+ /**
+ * Gets the current value of the given gauge.
+ * @param fs filesystem
+ * @param gaugeName gauge name
+ * @return the gauge value
+ */
+ public static long getLongGaugeValue(NativeAzureFileSystem fs,
+ String gaugeName) {
+ return getLongGauge(gaugeName, getMetrics(fs.getInstrumentation()));
+ }
+
+ /**
+ * Gets the current value of the given counter.
+ * @param fs filesystem
+ * @param counterName counter name
+ * @return the counter value
+ */
+ public static long getLongCounterValue(NativeAzureFileSystem fs,
+ String counterName) {
+ return getLongCounter(counterName, getMetrics(fs.getInstrumentation()));
+ }
+
+
+ /**
+ * Delete a path, catching any exception and downgrading to a log message.
+ * @param fs filesystem
+ * @param path path to delete
+ * @param recursive recursive delete?
+ * @throws IOException IO failure.
+ */
+ public static void deleteQuietly(FileSystem fs,
+ Path path,
+ boolean recursive) throws IOException {
+ if (fs != null && path != null) {
+ try {
+ fs.delete(path, recursive);
+ } catch (IOException e) {
+ LOG.warn("When deleting {}", path, e);
+ }
+ }
+ }
+
+
+ /**
+ * Clean up the test account if non-null; return null to put in the
+ * field.
+ * @param testAccount test account to clean up
+ * @return null
+ * @throws Execption cleanup problems
+ */
+ public static AzureBlobStorageTestAccount cleanup(
+ AzureBlobStorageTestAccount testAccount) throws Exception {
+ if (testAccount != null) {
+ testAccount.cleanup();
+ testAccount = null;
+ }
+ return null;
+ }
+
+
+ /**
+ * Clean up the test account; any thrown exceptions are caught and
+ * logged.
+ * @param testAccount test account
+ * @return null, so that any fields can be reset.
+ */
+ public static AzureBlobStorageTestAccount cleanupTestAccount(
+ AzureBlobStorageTestAccount testAccount) {
+ if (testAccount != null) {
+ try {
+ testAccount.cleanup();
+ } catch (Exception e) {
+ LOG.error("While cleaning up test account: ", e);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Assume that the scale tests are enabled by the relevant system property.
+ */
+ public static void assumeScaleTestsEnabled(Configuration conf) {
+ boolean enabled = getTestPropertyBool(
+ conf,
+ KEY_SCALE_TESTS_ENABLED,
+ DEFAULT_SCALE_TESTS_ENABLED);
+ assume("Scale test disabled: to enable set property "
+ + KEY_SCALE_TESTS_ENABLED,
+ enabled);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
new file mode 100644
index 0000000..059a8c4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.util.EnumSet;
+
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+
+/**
+ * This looks like a test, but it is really a command to invoke to
+ * clean up containers created in other test runs.
+ *
+ */
+public class CleanupTestContainers extends AbstractWasbTestBase {
+
+ private static final String CONTAINER_PREFIX = "wasbtests-";
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(
+ "CleanupTestContainers",
+ EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+ createConfiguration(),
+ true);
+ }
+
+ @Test
+ public void testEnumContainers() throws Throwable {
+ describe("Enumerating all the WASB test containers");
+
+ int count = 0;
+ CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
+ CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
+ Iterable<CloudBlobContainer> containers
+ = blobClient.listContainers(CONTAINER_PREFIX);
+ for (CloudBlobContainer container : containers) {
+ count++;
+ LOG.info("Container {} URI {}",
+ container.getName(),
+ container.getUri());
+ }
+ LOG.info("Found {} test containers", count);
+ }
+
+ @Test
+ public void testDeleteContainers() throws Throwable {
+ describe("Delete all the WASB test containers");
+ int count = 0;
+ CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
+ CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
+ Iterable<CloudBlobContainer> containers
+ = blobClient.listContainers(CONTAINER_PREFIX);
+ for (CloudBlobContainer container : containers) {
+ LOG.info("Container {} URI {}",
+ container.getName(),
+ container.getUri());
+ if (container.deleteIfExists()) {
+ count++;
+ }
+ }
+ LOG.info("Deleted {} test containers", count);
+ }
+
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
new file mode 100644
index 0000000..850aca1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * <b>Important:</b> the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ *
+ * <b>Ignore checkstyle complaints about naming: we need a scheme with visible
+ * ordering.</b>
+ */
+
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class ITestAzureHugeFiles extends AbstractAzureScaleTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ ITestAzureHugeFiles.class);
+
+ private Path scaleTestDir;
+ private Path hugefile;
+ private Path hugefileRenamed;
+ private AzureBlobStorageTestAccount testAccountForCleanup;
+
+ private static final int UPLOAD_BLOCKSIZE = 64 * S_1K;
+ private static final byte[] SOURCE_DATA;
+
+ static {
+ SOURCE_DATA = dataset(UPLOAD_BLOCKSIZE, 0, S_256);
+ }
+
+ private Path testPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = path("ITestAzureHugeFiles");
+ scaleTestDir = new Path(testPath, "scale");
+ hugefile = new Path(scaleTestDir, "hugefile");
+ hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+ }
+
+ /**
+ * Only clean up the test account (and delete the container) if the account
+ * is set in the field {@code testAccountForCleanup}.
+ * @throws Exception
+ */
+ @Override
+ public void tearDown() throws Exception {
+ testAccount = null;
+ super.tearDown();
+ if (testAccountForCleanup != null) {
+ cleanupTestAccount(testAccount);
+ }
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(
+ "testazurehugefiles",
+ EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+ createConfiguration(),
+ true);
+ }
+
+ /**
+ * Stop the test-case teardown from deleting the test path.
+ * @throws IOException never
+ */
+ protected void deleteTestDirInTeardown() throws IOException {
+ // this is a no-op, so the test file is preserved.
+ // the last test in the suite does the teardown
+ }
+
+ protected void deleteHugeFile() throws IOException {
+ describe("Deleting %s", hugefile);
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ getFileSystem().delete(hugefile, false);
+ timer.end("time to delete %s", hugefile);
+ }
+
+ /**
+ * Log how long an IOP took, by dividing the total time by the
+ * count of operations, printing in a human-readable form.
+ * @param operation operation being measured
+ * @param timer timing data
+ * @param count IOP count.
+ */
+ protected void logTimePerIOP(String operation,
+ ContractTestUtils.NanoTimer timer,
+ long count) {
+ LOG.info("Time per {}: {} nS",
+ operation, toHuman(timer.duration() / count));
+ }
+
+ /**
+ * Assume that the huge file exists, skip if not/empty.
+ * @return the file status
+ * @throws IOException IO failure
+ */
+ FileStatus assumeHugeFileExists() throws IOException {
+ assertPathExists(getFileSystem(), "huge file not created", hugefile);
+ try {
+ FileStatus status = getFileSystem().getFileStatus(hugefile);
+ Assume.assumeTrue("Not a file: " + status, status.isFile());
+ Assume.assumeTrue("File " + hugefile + " is empty", status.getLen() > 0);
+ return status;
+ } catch (FileNotFoundException e) {
+ skip("huge file not created: " + hugefile);
+ }
+ return null;
+ }
+
+ /**
+ * If/when {@link NativeAzureFileSystem#getStorageStatistics()} returns
+ * statistics, this will be interesting.
+ */
+ private void logFSState() {
+ StorageStatistics statistics = getFileSystem().getStorageStatistics();
+ Iterator<StorageStatistics.LongStatistic> longStatistics
+ = statistics.getLongStatistics();
+ while (longStatistics.hasNext()) {
+ StorageStatistics.LongStatistic next = longStatistics.next();
+ LOG.info("{} = {}", next.getName(), next.getValue());
+ }
+ }
+
+ @Test
+ public void test_010_CreateHugeFile() throws IOException {
+ long filesize = getTestPropertyBytes(getConfiguration(),
+ KEY_HUGE_FILESIZE,
+ DEFAULT_HUGE_FILESIZE);
+ long filesizeMB = filesize / S_1M;
+
+ // clean up from any previous attempts
+ deleteHugeFile();
+
+ describe("Creating file %s of size %d MB", hugefile, filesizeMB);
+
+ // now do a check of available upload time, with a pessimistic bandwidth
+ // (that of remote upload tests). If the test times out then not only is
+ // the test outcome lost, as the follow-on tests continue, they will
+ // overlap with the ongoing upload test, for much confusion.
+/*
+ int timeout = getTestTimeoutSeconds();
+ // assume 1 MB/s upload bandwidth
+ int bandwidth = _1MB;
+ long uploadTime = filesize / bandwidth;
+ assertTrue(String.format("Timeout set in %s seconds is too low;" +
+ " estimating upload time of %d seconds at 1 MB/s." +
+ " Rerun tests with -D%s=%d",
+ timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
+ uploadTime < timeout);
+*/
+ assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize
+ + " is not a multiple of " + UPLOAD_BLOCKSIZE,
+ 0, filesize % UPLOAD_BLOCKSIZE);
+
+ byte[] data = SOURCE_DATA;
+
+ long blocks = filesize / UPLOAD_BLOCKSIZE;
+ long blocksPerMB = S_1M / UPLOAD_BLOCKSIZE;
+
+ // perform the upload.
+ // there's lots of logging here, so that a tail -f on the output log
+ // can give a view of what is happening.
+ NativeAzureFileSystem fs = getFileSystem();
+
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ long blocksPer10MB = blocksPerMB * 10;
+ fs.mkdirs(hugefile.getParent());
+ try (FSDataOutputStream out = fs.create(hugefile,
+ true,
+ UPLOAD_BLOCKSIZE,
+ null)) {
+ for (long block = 1; block <= blocks; block++) {
+ out.write(data);
+ long written = block * UPLOAD_BLOCKSIZE;
+ // every 10 MB and on file upload @ 100%, print some stats
+ if (block % blocksPer10MB == 0 || written == filesize) {
+ long percentage = written * 100 / filesize;
+ double elapsedTime = timer.elapsedTime() / NANOSEC;
+ double writtenMB = 1.0 * written / S_1M;
+ LOG.info(String.format("[%02d%%] Buffered %.2f MB out of %d MB;"
+ + " elapsedTime=%.2fs; write to buffer bandwidth=%.2f MB/s",
+ percentage,
+ writtenMB,
+ filesizeMB,
+ elapsedTime,
+ writtenMB / elapsedTime));
+ }
+ }
+ // now close the file
+ LOG.info("Closing stream {}", out);
+ ContractTestUtils.NanoTimer closeTimer
+ = new ContractTestUtils.NanoTimer();
+ out.close();
+ closeTimer.end("time to close() output stream");
+ }
+
+ timer.end("time to write %d MB in blocks of %d",
+ filesizeMB, UPLOAD_BLOCKSIZE);
+ logFSState();
+ bandwidth(timer, filesize);
+ ContractTestUtils.assertPathExists(fs, "Huge file", hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
+ ContractTestUtils.assertIsFile(hugefile, status);
+ assertEquals("File size in " + status, filesize, status.getLen());
+ }
+
+ @Test
+ public void test_040_PositionedReadHugeFile() throws Throwable {
+ assumeHugeFileExists();
+ describe("Positioned reads of file %s", hugefile);
+ NativeAzureFileSystem fs = getFileSystem();
+ FileStatus status = fs.getFileStatus(hugefile);
+ long filesize = status.getLen();
+ int ops = 0;
+ final int bufferSize = 8192;
+ byte[] buffer = new byte[bufferSize];
+ long eof = filesize - 1;
+
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ ContractTestUtils.NanoTimer readAtByte0, readAtByte0Again, readAtEOF;
+ try (FSDataInputStream in = openDataFile()) {
+ readAtByte0 = new ContractTestUtils.NanoTimer();
+ in.readFully(0, buffer);
+ readAtByte0.end("time to read data at start of file");
+ ops++;
+
+ readAtEOF = new ContractTestUtils.NanoTimer();
+ in.readFully(eof - bufferSize, buffer);
+ readAtEOF.end("time to read data at end of file");
+ ops++;
+
+ readAtByte0Again = new ContractTestUtils.NanoTimer();
+ in.readFully(0, buffer);
+ readAtByte0Again.end("time to read data at start of file again");
+ ops++;
+ LOG.info("Final stream state: {}", in);
+ }
+ long mb = Math.max(filesize / S_1M, 1);
+
+ logFSState();
+ timer.end("time to performed positioned reads of %d MB ", mb);
+ LOG.info("Time per positioned read = {} nS",
+ toHuman(timer.nanosPerOperation(ops)));
+ }
+
+ protected FSDataInputStream openDataFile() throws IOException {
+ NanoTimer openTimer = new NanoTimer();
+ FSDataInputStream inputStream = getFileSystem().open(hugefile,
+ UPLOAD_BLOCKSIZE);
+ openTimer.end("open data file");
+ return inputStream;
+ }
+
+
+ /**
+ * Work out the bandwidth in bytes/second.
+ * @param timer timer measuring the duration
+ * @param bytes bytes
+ * @return the number of bytes/second of the recorded operation
+ */
+ public static double bandwidthInBytes(NanoTimer timer, long bytes) {
+ return bytes * NANOSEC / timer.duration();
+ }
+
+ @Test
+ public void test_050_readHugeFile() throws Throwable {
+ assumeHugeFileExists();
+ describe("Reading %s", hugefile);
+ NativeAzureFileSystem fs = getFileSystem();
+ FileStatus status = fs.getFileStatus(hugefile);
+ long filesize = status.getLen();
+ long blocks = filesize / UPLOAD_BLOCKSIZE;
+ byte[] data = new byte[UPLOAD_BLOCKSIZE];
+
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ try (FSDataInputStream in = openDataFile()) {
+ for (long block = 0; block < blocks; block++) {
+ in.readFully(data);
+ }
+ LOG.info("Final stream state: {}", in);
+ }
+
+ long mb = Math.max(filesize / S_1M, 1);
+ timer.end("time to read file of %d MB ", mb);
+ LOG.info("Time per MB to read = {} nS",
+ toHuman(timer.nanosPerOperation(mb)));
+ bandwidth(timer, filesize);
+ logFSState();
+ }
+
+ @Test
+ public void test_060_openAndReadWholeFileBlocks() throws Throwable {
+ FileStatus status = assumeHugeFileExists();
+ int blockSize = S_1M;
+ describe("Open the test file and read it in blocks of size %d",
+ blockSize);
+ long len = status.getLen();
+ FSDataInputStream in = openDataFile();
+ NanoTimer timer2 = null;
+ long blockCount = 0;
+ long totalToRead = 0;
+ int resetCount = 0;
+ try {
+ byte[] block = new byte[blockSize];
+ timer2 = new NanoTimer();
+ long count = 0;
+ // implicitly rounding down here
+ blockCount = len / blockSize;
+ totalToRead = blockCount * blockSize;
+ long minimumBandwidth = S_128K;
+ int maxResetCount = 4;
+ resetCount = 0;
+ for (long i = 0; i < blockCount; i++) {
+ int offset = 0;
+ int remaining = blockSize;
+ long blockId = i + 1;
+ NanoTimer blockTimer = new NanoTimer();
+ int reads = 0;
+ while (remaining > 0) {
+ NanoTimer readTimer = new NanoTimer();
+ int bytesRead = in.read(block, offset, remaining);
+ reads++;
+ if (bytesRead == 1) {
+ break;
+ }
+ remaining -= bytesRead;
+ offset += bytesRead;
+ count += bytesRead;
+ readTimer.end();
+ if (bytesRead != 0) {
+ LOG.debug("Bytes in read #{}: {} , block bytes: {},"
+ + " remaining in block: {}"
+ + " duration={} nS; ns/byte: {}, bandwidth={} MB/s",
+ reads, bytesRead, blockSize - remaining, remaining,
+ readTimer.duration(),
+ readTimer.nanosPerOperation(bytesRead),
+ readTimer.bandwidthDescription(bytesRead));
+ } else {
+ LOG.warn("0 bytes returned by read() operation #{}", reads);
+ }
+ }
+ blockTimer.end("Reading block %d in %d reads", blockId, reads);
+ String bw = blockTimer.bandwidthDescription(blockSize);
+ LOG.info("Bandwidth of block {}: {} MB/s: ", blockId, bw);
+ if (bandwidthInBytes(blockTimer, blockSize) < minimumBandwidth) {
+ LOG.warn("Bandwidth {} too low on block {}: resetting connection",
+ bw, blockId);
+ Assert.assertTrue("Bandwidth of " + bw + " too low after "
+ + resetCount + " attempts", resetCount <= maxResetCount);
+ resetCount++;
+ // reset the connection
+ }
+ }
+ } finally {
+ IOUtils.closeStream(in);
+ }
+ timer2.end("Time to read %d bytes in %d blocks", totalToRead, blockCount);
+ LOG.info("Overall Bandwidth {} MB/s; reset connections {}",
+ timer2.bandwidth(totalToRead), resetCount);
+ }
+
+ @Test
+ public void test_100_renameHugeFile() throws Throwable {
+ assumeHugeFileExists();
+ describe("renaming %s to %s", hugefile, hugefileRenamed);
+ NativeAzureFileSystem fs = getFileSystem();
+ FileStatus status = fs.getFileStatus(hugefile);
+ long filesize = status.getLen();
+ fs.delete(hugefileRenamed, false);
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ fs.rename(hugefile, hugefileRenamed);
+ long mb = Math.max(filesize / S_1M, 1);
+ timer.end("time to rename file of %d MB", mb);
+ LOG.info("Time per MB to rename = {} nS",
+ toHuman(timer.nanosPerOperation(mb)));
+ bandwidth(timer, filesize);
+ logFSState();
+ FileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
+ assertEquals(filesize, destFileStatus.getLen());
+
+ // rename back
+ ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
+ fs.rename(hugefileRenamed, hugefile);
+ timer2.end("Renaming back");
+ LOG.info("Time per MB to rename = {} nS",
+ toHuman(timer2.nanosPerOperation(mb)));
+ bandwidth(timer2, filesize);
+ }
+
+ @Test
+ public void test_999_deleteHugeFiles() throws IOException {
+ // mark the test account for cleanup after this test
+ testAccountForCleanup = testAccount;
+ deleteHugeFile();
+ ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
+ NativeAzureFileSystem fs = getFileSystem();
+ fs.delete(hugefileRenamed, false);
+ timer2.end("time to delete %s", hugefileRenamed);
+ rm(fs, testPath, true, false);
+ assertPathDoesNotExist(fs, "deleted huge file", testPath);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
new file mode 100644
index 0000000..92b10cf
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+/**
+ * Sizes of data.
+ * Checkstyle doesn't like the naming scheme or the fact its an interface.
+ */
+public interface Sizes {
+
+ int S_256 = 256;
+ int S_512 = 512;
+ int S_1K = 1024;
+ int S_4K = 4 * S_1K;
+ int S_8K = 8 * S_1K;
+ int S_16K = 16 * S_1K;
+ int S_32K = 32 * S_1K;
+ int S_64K = 64 * S_1K;
+ int S_128K = 128 * S_1K;
+ int S_256K = 256 * S_1K;
+ int S_1M = S_1K * S_1K;
+ int S_2M = 2 * S_1M;
+ int S_5M = 5 * S_1M;
+ int S_10M = 10* S_1M;
+ double NANOSEC = 1.0e9;
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
new file mode 100644
index 0000000..60e24ee
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
@@ -0,0 +1,586 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.metrics;
+
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_CLIENT_ERRORS;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DIRECTORIES_CREATED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_LATENCY;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_RATE;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_CREATED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_DELETED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_SERVER_ERRORS;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_LATENCY;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_RATE;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.verify;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Date;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.AzureException;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.hamcrest.BaseMatcher;
+import org.hamcrest.Description;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Instrumentation test, changing state of time and verifying metrics are
+ * consistent.
+ */
+public class ITestAzureFileSystemInstrumentation extends AbstractWasbTestBase {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(ITestAzureFileSystemInstrumentation.class);
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Test
+ public void testMetricTags() throws Exception {
+ String accountName =
+ getTestAccount().getRealAccount().getBlobEndpoint()
+ .getAuthority();
+ String containerName =
+ getTestAccount().getRealContainer().getName();
+ MetricsRecordBuilder myMetrics = getMyMetrics();
+ verify(myMetrics).add(argThat(
+ new TagMatcher("accountName", accountName)
+ ));
+ verify(myMetrics).add(argThat(
+ new TagMatcher("containerName", containerName)
+ ));
+ verify(myMetrics).add(argThat(
+ new TagMatcher("Context", "azureFileSystem")
+ ));
+ verify(myMetrics).add(argThat(
+ new TagExistsMatcher("wasbFileSystemId")
+ ));
+ }
+
+
+ @Test
+ public void testMetricsOnMkdirList() throws Exception {
+ long base = getBaseWebResponses();
+
+ // Create a directory
+ assertTrue(fs.mkdirs(new Path("a")));
+ // At the time of writing
+ // getAncestor uses 2 calls for each folder level /user/<name>/a
+ // plus 1 call made by checkContainer
+ // mkdir checks the hierarchy with 2 calls per level
+ // mkdirs calls storeEmptyDir to create the empty folder, which makes 5 calls
+ // For a total of 7 + 6 + 5 = 18 web responses
+ base = assertWebResponsesInRange(base, 1, 18);
+ assertEquals(1,
+ AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
+
+ // List the root contents
+ assertEquals(1, getFileSystem().listStatus(new Path("/")).length);
+ base = assertWebResponsesEquals(base, 1);
+
+ assertNoErrors();
+ }
+
+ private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
+ NativeAzureFileSystem azureFs = (NativeAzureFileSystem) getFileSystem();
+ AzureNativeFileSystemStore azureStore = azureFs.getStore();
+ return azureStore.getBandwidthGaugeUpdater();
+ }
+
+ private static byte[] nonZeroByteArray(int size) {
+ byte[] data = new byte[size];
+ Arrays.fill(data, (byte)5);
+ return data;
+ }
+
+ @Test
+ public void testMetricsOnFileCreateRead() throws Exception {
+ long base = getBaseWebResponses();
+
+ assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
+
+ Path filePath = new Path("/metricsTest_webResponses");
+ final int FILE_SIZE = 1000;
+
+ // Suppress auto-update of bandwidth metrics so we get
+ // to update them exactly when we want to.
+ getBandwidthGaugeUpdater().suppressAutoUpdate();
+
+ // Create a file
+ Date start = new Date();
+ OutputStream outputStream = getFileSystem().create(filePath);
+ outputStream.write(nonZeroByteArray(FILE_SIZE));
+ outputStream.close();
+ long uploadDurationMs = new Date().getTime() - start.getTime();
+
+ // The exact number of requests/responses that happen to create a file
+ // can vary - at the time of writing this code it takes 10
+ // requests/responses for the 1000 byte file (33 for 100 MB),
+ // plus the initial container-check request but that
+ // can very easily change in the future. Just assert that we do roughly
+ // more than 2 but less than 15.
+ logOpResponseCount("Creating a 1K file", base);
+ base = assertWebResponsesInRange(base, 2, 15);
+ getBandwidthGaugeUpdater().triggerUpdate(true);
+ long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
+ assertTrue("The bytes written in the last second " + bytesWritten +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
+ long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
+ assertTrue("The total bytes written " + totalBytesWritten +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
+ long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
+ LOG.info("Upload rate: " + uploadRate + " bytes/second.");
+ long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
+ assertTrue("The upload rate " + uploadRate +
+ " is below the expected range of around " + expectedRate +
+ " bytes/second that the unit test observed. This should never be" +
+ " the case since the test underestimates the rate by looking at " +
+ " end-to-end time instead of just block upload time.",
+ uploadRate >= expectedRate);
+ long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_UPLOAD_LATENCY);
+ LOG.info("Upload latency: {}", uploadLatency);
+ long expectedLatency = uploadDurationMs; // We're uploading less than a block.
+ assertTrue("The upload latency " + uploadLatency +
+ " should be greater than zero now that I've just uploaded a file.",
+ uploadLatency > 0);
+ assertTrue("The upload latency " + uploadLatency +
+ " is more than the expected range of around " + expectedLatency +
+ " milliseconds that the unit test observed. This should never be" +
+ " the case since the test overestimates the latency by looking at " +
+ " end-to-end time instead of just block upload time.",
+ uploadLatency <= expectedLatency);
+
+ // Read the file
+ start = new Date();
+ InputStream inputStream = getFileSystem().open(filePath);
+ int count = 0;
+ while (inputStream.read() >= 0) {
+ count++;
+ }
+ inputStream.close();
+ long downloadDurationMs = new Date().getTime() - start.getTime();
+ assertEquals(FILE_SIZE, count);
+
+ // Again, exact number varies. At the time of writing this code
+ // it takes 4 request/responses, so just assert a rough range between
+ // 1 and 10.
+ logOpResponseCount("Reading a 1K file", base);
+ base = assertWebResponsesInRange(base, 1, 10);
+ getBandwidthGaugeUpdater().triggerUpdate(false);
+ long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
+ assertEquals(FILE_SIZE, totalBytesRead);
+ long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
+ assertTrue("The bytes read in the last second " + bytesRead +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
+ long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
+ LOG.info("Download rate: " + downloadRate + " bytes/second.");
+ expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
+ assertTrue("The download rate " + downloadRate +
+ " is below the expected range of around " + expectedRate +
+ " bytes/second that the unit test observed. This should never be" +
+ " the case since the test underestimates the rate by looking at " +
+ " end-to-end time instead of just block download time.",
+ downloadRate >= expectedRate);
+ long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_DOWNLOAD_LATENCY);
+ LOG.info("Download latency: " + downloadLatency);
+ expectedLatency = downloadDurationMs; // We're downloading less than a block.
+ assertTrue("The download latency " + downloadLatency +
+ " should be greater than zero now that I've just downloaded a file.",
+ downloadLatency > 0);
+ assertTrue("The download latency " + downloadLatency +
+ " is more than the expected range of around " + expectedLatency +
+ " milliseconds that the unit test observed. This should never be" +
+ " the case since the test overestimates the latency by looking at " +
+ " end-to-end time instead of just block download time.",
+ downloadLatency <= expectedLatency);
+
+ assertNoErrors();
+ }
+
+ @Test
+ public void testMetricsOnBigFileCreateRead() throws Exception {
+ long base = getBaseWebResponses();
+
+ assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
+
+ Path filePath = new Path("/metricsTest_webResponses");
+ final int FILE_SIZE = 100 * 1024 * 1024;
+
+ // Suppress auto-update of bandwidth metrics so we get
+ // to update them exactly when we want to.
+ getBandwidthGaugeUpdater().suppressAutoUpdate();
+
+ // Create a file
+ OutputStream outputStream = getFileSystem().create(filePath);
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+
+ // The exact number of requests/responses that happen to create a file
+ // can vary - at the time of writing this code it takes 34
+ // requests/responses for the 100 MB file,
+ // plus the initial container check request, but that
+ // can very easily change in the future. Just assert that we do roughly
+ // more than 20 but less than 50.
+ logOpResponseCount("Creating a 100 MB file", base);
+ base = assertWebResponsesInRange(base, 20, 50);
+ getBandwidthGaugeUpdater().triggerUpdate(true);
+ long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
+ assertTrue("The total bytes written " + totalBytesWritten +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
+ long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
+ LOG.info("Upload rate: " + uploadRate + " bytes/second.");
+ long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_UPLOAD_LATENCY);
+ LOG.info("Upload latency: " + uploadLatency);
+ assertTrue("The upload latency " + uploadLatency +
+ " should be greater than zero now that I've just uploaded a file.",
+ uploadLatency > 0);
+
+ // Read the file
+ InputStream inputStream = getFileSystem().open(filePath);
+ int count = 0;
+ while (inputStream.read() >= 0) {
+ count++;
+ }
+ inputStream.close();
+ assertEquals(FILE_SIZE, count);
+
+ // Again, exact number varies. At the time of writing this code
+ // it takes 27 request/responses, so just assert a rough range between
+ // 20 and 40.
+ logOpResponseCount("Reading a 100 MB file", base);
+ base = assertWebResponsesInRange(base, 20, 40);
+ getBandwidthGaugeUpdater().triggerUpdate(false);
+ long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
+ assertEquals(FILE_SIZE, totalBytesRead);
+ long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
+ LOG.info("Download rate: " + downloadRate + " bytes/second.");
+ long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_DOWNLOAD_LATENCY);
+ LOG.info("Download latency: " + downloadLatency);
+ assertTrue("The download latency " + downloadLatency +
+ " should be greater than zero now that I've just downloaded a file.",
+ downloadLatency > 0);
+ }
+
+ @Test
+ public void testMetricsOnFileRename() throws Exception {
+ long base = getBaseWebResponses();
+
+ Path originalPath = new Path("/metricsTest_RenameStart");
+ Path destinationPath = new Path("/metricsTest_RenameFinal");
+
+ // Create an empty file
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
+ assertTrue(getFileSystem().createNewFile(originalPath));
+ logOpResponseCount("Creating an empty file", base);
+ base = assertWebResponsesInRange(base, 2, 20);
+ assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
+
+ // Rename the file
+ assertTrue(
+ ((FileSystem) getFileSystem()).rename(originalPath, destinationPath));
+ // Varies: at the time of writing this code it takes 7 requests/responses.
+ logOpResponseCount("Renaming a file", base);
+ base = assertWebResponsesInRange(base, 2, 15);
+
+ assertNoErrors();
+ }
+
+ @Test
+ public void testMetricsOnFileExistsDelete() throws Exception {
+ long base = getBaseWebResponses();
+
+ Path filePath = new Path("/metricsTest_delete");
+
+ // Check existence
+ assertFalse(getFileSystem().exists(filePath));
+ // At the time of writing this code it takes 2 requests/responses to
+ // check existence, which seems excessive, plus initial request for
+ // container check.
+ logOpResponseCount("Checking file existence for non-existent file", base);
+ base = assertWebResponsesInRange(base, 1, 3);
+
+ // Create an empty file
+ assertTrue(getFileSystem().createNewFile(filePath));
+ base = getCurrentWebResponses();
+
+ // Check existence again
+ assertTrue(getFileSystem().exists(filePath));
+ logOpResponseCount("Checking file existence for existent file", base);
+ base = assertWebResponsesInRange(base, 1, 2);
+
+ // Delete the file
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
+ assertTrue(getFileSystem().delete(filePath, false));
+ // At the time of writing this code it takes 4 requests/responses to
+ // delete, which seems excessive. Check for range 1-4 for now.
+ logOpResponseCount("Deleting a file", base);
+ base = assertWebResponsesInRange(base, 1, 4);
+ assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
+
+ assertNoErrors();
+ }
+
+ @Test
+ public void testMetricsOnDirRename() throws Exception {
+ long base = getBaseWebResponses();
+
+ Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
+ Path innerFileName = new Path(originalDirName, "innerFile");
+ Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
+
+ // Create an empty directory
+ assertTrue(getFileSystem().mkdirs(originalDirName));
+ base = getCurrentWebResponses();
+
+ // Create an inner file
+ assertTrue(getFileSystem().createNewFile(innerFileName));
+ base = getCurrentWebResponses();
+
+ // Rename the directory
+ assertTrue(getFileSystem().rename(originalDirName, destDirName));
+
+ // At the time of writing this code it takes 11 requests/responses
+ // to rename the directory with one file. Check for range 1-20 for now.
+ logOpResponseCount("Renaming a directory", base);
+ base = assertWebResponsesInRange(base, 1, 20);
+
+ assertNoErrors();
+ }
+
+ /**
+ * Recursive discovery of path depth
+ * @param path path to measure.
+ * @return depth, where "/" == 0.
+ */
+ int depth(Path path) {
+ if (path.isRoot()) {
+ return 0;
+ } else {
+ return 1 + depth(path.getParent());
+ }
+ }
+
+ @Test
+ public void testClientErrorMetrics() throws Exception {
+ String fileName = "metricsTestFile_ClientError";
+ Path filePath = new Path("/"+fileName);
+ final int FILE_SIZE = 100;
+ OutputStream outputStream = null;
+ String leaseID = null;
+ try {
+ // Create a file
+ outputStream = getFileSystem().create(filePath);
+ leaseID = getTestAccount().acquireShortLease(fileName);
+ try {
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+ assertTrue("Should've thrown", false);
+ } catch (AzureException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("lease"));
+ }
+ assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
+ } finally {
+ if(leaseID != null){
+ getTestAccount().releaseLease(leaseID, fileName);
+ }
+ IOUtils.closeStream(outputStream);
+ }
+ }
+
+ private void logOpResponseCount(String opName, long base) {
+ LOG.info("{} took {} web responses to complete.",
+ opName, getCurrentWebResponses() - base);
+ }
+
+ /**
+ * Gets (and asserts) the value of the wasb_web_responses counter just
+ * after the creation of the file system object.
+ */
+ private long getBaseWebResponses() {
+ // The number of requests should start at 0
+ return assertWebResponsesEquals(0, 0);
+ }
+
+ /**
+ * Gets the current value of the wasb_web_responses counter.
+ */
+ private long getCurrentWebResponses() {
+ return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
+ }
+
+ /**
+ * Checks that the wasb_web_responses counter is at the given value.
+ * @param base The base value (before the operation of interest).
+ * @param expected The expected value for the operation of interest.
+ * @return The new base value now.
+ */
+ private long assertWebResponsesEquals(long base, long expected) {
+ assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
+ return base + expected;
+ }
+
+ private void assertNoErrors() {
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
+ }
+
+ /**
+ * Checks that the wasb_web_responses counter is in the given range.
+ * @param base The base value (before the operation of interest).
+ * @param inclusiveLowerLimit The lower limit for what it should increase by.
+ * @param inclusiveUpperLimit The upper limit for what it should increase by.
+ * @return The new base value now.
+ */
+ private long assertWebResponsesInRange(long base,
+ long inclusiveLowerLimit,
+ long inclusiveUpperLimit) {
+ long currentResponses = getCurrentWebResponses();
+ long justOperation = currentResponses - base;
+ assertTrue(String.format(
+ "Web responses expected in range [%d, %d], but was %d.",
+ inclusiveLowerLimit, inclusiveUpperLimit, justOperation),
+ justOperation >= inclusiveLowerLimit &&
+ justOperation <= inclusiveUpperLimit);
+ return currentResponses;
+ }
+
+ /**
+ * Gets the metrics for the file system object.
+ * @return The metrics record.
+ */
+ private MetricsRecordBuilder getMyMetrics() {
+ return getMetrics(getInstrumentation());
+ }
+
+ private AzureFileSystemInstrumentation getInstrumentation() {
+ return getFileSystem().getInstrumentation();
+ }
+
+ /**
+ * A matcher class for asserting that we got a tag with a given
+ * value.
+ */
+ private static class TagMatcher extends TagExistsMatcher {
+ private final String tagValue;
+
+ public TagMatcher(String tagName, String tagValue) {
+ super(tagName);
+ this.tagValue = tagValue;
+ }
+
+ @Override
+ public boolean matches(MetricsTag toMatch) {
+ return toMatch.value().equals(tagValue);
+ }
+
+ @Override
+ public void describeTo(Description desc) {
+ super.describeTo(desc);
+ desc.appendText(" with value " + tagValue);
+ }
+ }
+
+ /**
+ * A matcher class for asserting that we got a tag with any value.
+ */
+ private static class TagExistsMatcher extends BaseMatcher<MetricsTag> {
+ private final String tagName;
+
+ public TagExistsMatcher(String tagName) {
+ this.tagName = tagName;
+ }
+
+ @Override
+ public boolean matches(Object toMatch) {
+ MetricsTag asTag = (MetricsTag)toMatch;
+ return asTag.name().equals(tagName) && matches(asTag);
+ }
+
+ protected boolean matches(MetricsTag toMatch) {
+ return true;
+ }
+
+ @Override
+ public void describeTo(Description desc) {
+ desc.appendText("Has tag " + tagName);
+ }
+ }
+
+ /**
+ * A matcher class for asserting that a long value is in a
+ * given range.
+ */
+ private static class InRange extends BaseMatcher<Long> {
+ private final long inclusiveLowerLimit;
+ private final long inclusiveUpperLimit;
+ private long obtained;
+
+ public InRange(long inclusiveLowerLimit, long inclusiveUpperLimit) {
+ this.inclusiveLowerLimit = inclusiveLowerLimit;
+ this.inclusiveUpperLimit = inclusiveUpperLimit;
+ }
+
+ @Override
+ public boolean matches(Object number) {
+ obtained = (Long)number;
+ return obtained >= inclusiveLowerLimit &&
+ obtained <= inclusiveUpperLimit;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("Between " + inclusiveLowerLimit +
+ " and " + inclusiveUpperLimit + " inclusively");
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
new file mode 100644
index 0000000..0b72f06
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Constants for the Azure tests.
+ */
+public interface AzureTestConstants {
+
+ /**
+ * Prefix for any cross-filesystem scale test options.
+ */
+ String SCALE_TEST = "scale.test.";
+
+ /**
+ * Prefix for wasb-specific scale tests.
+ */
+ String AZURE_SCALE_TEST = "fs.azure.scale.test.";
+
+ /**
+ * Prefix for FS wasb tests.
+ */
+ String TEST_FS_WASB = "test.fs.azure.";
+
+ /**
+ * Name of the test filesystem.
+ */
+ String TEST_FS_WASB_NAME = TEST_FS_WASB + "name";
+
+ /**
+ * Tell tests that they are being executed in parallel: {@value}.
+ */
+ String KEY_PARALLEL_TEST_EXECUTION = "test.parallel.execution";
+
+ /**
+ * A property set to true in maven if scale tests are enabled: {@value}.
+ */
+ String KEY_SCALE_TESTS_ENABLED = AZURE_SCALE_TEST + "enabled";
+
+ /**
+ * The number of operations to perform: {@value}.
+ */
+ String KEY_OPERATION_COUNT = SCALE_TEST + "operation.count";
+
+ /**
+ * The number of directory operations to perform: {@value}.
+ */
+ String KEY_DIRECTORY_COUNT = SCALE_TEST + "directory.count";
+
+ /**
+ * The readahead buffer: {@value}.
+ */
+ String KEY_READ_BUFFER_SIZE = AZURE_SCALE_TEST + "read.buffer.size";
+
+ int DEFAULT_READ_BUFFER_SIZE = 16384;
+
+ /**
+ * Key for a multi MB test file: {@value}.
+ */
+ String KEY_CSVTEST_FILE = AZURE_SCALE_TEST + "csvfile";
+
+ /**
+ * Default path for the multi MB test file: {@value}.
+ */
+ String DEFAULT_CSVTEST_FILE = "wasb://datasets@azuremlsampleexperiments.blob.core.windows.net/network_intrusion_detection.csv";
+
+ /**
+ * Name of the property to define the timeout for scale tests: {@value}.
+ * Measured in seconds.
+ */
+ String KEY_TEST_TIMEOUT = AZURE_SCALE_TEST + "timeout";
+
+ /**
+ * Name of the property to define the file size for the huge file
+ * tests: {@value}.
+ * Measured in KB; a suffix like "M", or "G" will change the unit.
+ */
+ String KEY_HUGE_FILESIZE = AZURE_SCALE_TEST + "huge.filesize";
+
+ /**
+ * Name of the property to define the partition size for the huge file
+ * tests: {@value}.
+ * Measured in KB; a suffix like "M", or "G" will change the unit.
+ */
+ String KEY_HUGE_PARTITION_SIZE = AZURE_SCALE_TEST + "huge.partitionsize";
+
+ /**
+ * The default huge size is small —full 5GB+ scale tests are something
+ * to run in long test runs on EC2 VMs. {@value}.
+ */
+ String DEFAULT_HUGE_FILESIZE = "10M";
+
+ /**
+ * The default number of operations to perform: {@value}.
+ */
+ long DEFAULT_OPERATION_COUNT = 2005;
+
+ /**
+ * Default number of directories to create when performing
+ * directory performance/scale tests.
+ */
+ int DEFAULT_DIRECTORY_COUNT = 2;
+
+ /**
+ * Default policy on scale tests: {@value}.
+ */
+ boolean DEFAULT_SCALE_TESTS_ENABLED = false;
+
+ /**
+ * Fork ID passed down from maven if the test is running in parallel.
+ */
+ String TEST_UNIQUE_FORK_ID = "test.unique.fork.id";
+
+ /**
+ * Timeout in Milliseconds for standard tests: {@value}.
+ */
+ int AZURE_TEST_TIMEOUT = 10 * 60 * 1000;
+
+ /**
+ * Timeout in Seconds for Scale Tests: {@value}.
+ */
+ int SCALE_TEST_TIMEOUT_SECONDS = 30 * 60;
+
+ int SCALE_TEST_TIMEOUT_MILLIS = SCALE_TEST_TIMEOUT_SECONDS * 1000;
+
+
+
+ String ACCOUNT_KEY_PROPERTY_NAME
+ = "fs.azure.account.key.";
+ String SAS_PROPERTY_NAME = "fs.azure.sas.";
+ String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
+ String TEST_ACCOUNT_NAME_PROPERTY_NAME
+ = "fs.azure.test.account.name";
+ String MOCK_ACCOUNT_NAME
+ = "mockAccount.blob.core.windows.net";
+ String MOCK_CONTAINER_NAME = "mockContainer";
+ String WASB_AUTHORITY_DELIMITER = "@";
+ String WASB_SCHEME = "wasb";
+ String PATH_DELIMITER = "/";
+ String AZURE_ROOT_CONTAINER = "$root";
+ String MOCK_WASB_URI = "wasb://" + MOCK_CONTAINER_NAME
+ + WASB_AUTHORITY_DELIMITER + MOCK_ACCOUNT_NAME + "/";
+ String USE_EMULATOR_PROPERTY_NAME
+ = "fs.azure.test.emulator";
+
+ String KEY_DISABLE_THROTTLING
+ = "fs.azure.disable.bandwidth.throttling";
+ String KEY_READ_TOLERATE_CONCURRENT_APPEND
+ = "fs.azure.io.read.tolerate.concurrent.append";
+ /**
+ * Path for page blobs: {@value}.
+ */
+ String DEFAULT_PAGE_BLOB_DIRECTORY = "pageBlobs";
+
+ String DEFAULT_ATOMIC_RENAME_DIRECTORIES
+ = "/atomicRenameDir1,/atomicRenameDir2";
+
+ /**
+ * Base directory for page blobs.
+ */
+ Path PAGE_BLOB_DIR = new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
new file mode 100644
index 0000000..2fbbcd1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
@@ -0,0 +1,479 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.internal.AssumptionViolatedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestConstants.*;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getLongGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+
+/**
+ * Utilities for the Azure tests. Based on {@code S3ATestUtils}, so
+ * (initially) has unused method.
+ */
+public final class AzureTestUtils extends Assert {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ AzureTestUtils.class);
+
+ /**
+ * Value to set a system property to (in maven) to declare that
+ * a property has been unset.
+ */
+ public static final String UNSET_PROPERTY = "unset";
+
+ /**
+ * Create the test filesystem.
+ *
+ * If the test.fs.wasb.name property is not set, this will
+ * raise a JUnit assumption exception
+ *
+ * @param conf configuration
+ * @return the FS
+ * @throws IOException IO Problems
+ * @throws AssumptionViolatedException if the FS is not named
+ */
+ public static NativeAzureFileSystem createTestFileSystem(Configuration conf)
+ throws IOException {
+
+ String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
+
+ boolean liveTest = !StringUtils.isEmpty(fsname);
+ URI testURI = null;
+ if (liveTest) {
+ testURI = URI.create(fsname);
+ liveTest = testURI.getScheme().equals(WASB_SCHEME);
+ }
+ if (!liveTest) {
+ // Skip the test
+ throw new AssumptionViolatedException(
+ "No test filesystem in " + TEST_FS_WASB_NAME);
+ }
+ NativeAzureFileSystem fs1 = new NativeAzureFileSystem();
+ fs1.initialize(testURI, conf);
+ return fs1;
+ }
+
+ /**
+ * Create a file context for tests.
+ *
+ * If the test.fs.wasb.name property is not set, this will
+ * trigger a JUnit failure.
+ *
+ * Multipart purging is enabled.
+ * @param conf configuration
+ * @return the FS
+ * @throws IOException IO Problems
+ * @throws AssumptionViolatedException if the FS is not named
+ */
+ public static FileContext createTestFileContext(Configuration conf)
+ throws IOException {
+ String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
+
+ boolean liveTest = !StringUtils.isEmpty(fsname);
+ URI testURI = null;
+ if (liveTest) {
+ testURI = URI.create(fsname);
+ liveTest = testURI.getScheme().equals(WASB_SCHEME);
+ }
+ if (!liveTest) {
+ // This doesn't work with our JUnit 3 style test cases, so instead we'll
+ // make this whole class not run by default
+ throw new AssumptionViolatedException("No test filesystem in "
+ + TEST_FS_WASB_NAME);
+ }
+ FileContext fc = FileContext.getFileContext(testURI, conf);
+ return fc;
+ }
+
+ /**
+ * Get a long test property.
+ * <ol>
+ * <li>Look up configuration value (which can pick up core-default.xml),
+ * using {@code defVal} as the default value (if conf != null).
+ * </li>
+ * <li>Fetch the system property.</li>
+ * <li>If the system property is not empty or "(unset)":
+ * it overrides the conf value.
+ * </li>
+ * </ol>
+ * This puts the build properties in charge of everything. It's not a
+ * perfect design; having maven set properties based on a file, as ant let
+ * you do, is better for customization.
+ *
+ * As to why there's a special (unset) value, see
+ * {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
+ * @param conf config: may be null
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static long getTestPropertyLong(Configuration conf,
+ String key, long defVal) {
+ return Long.valueOf(
+ getTestProperty(conf, key, Long.toString(defVal)));
+ }
+ /**
+ * Get a test property value in bytes, using k, m, g, t, p, e suffixes.
+ * {@link org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix#string2long(String)}
+ * <ol>
+ * <li>Look up configuration value (which can pick up core-default.xml),
+ * using {@code defVal} as the default value (if conf != null).
+ * </li>
+ * <li>Fetch the system property.</li>
+ * <li>If the system property is not empty or "(unset)":
+ * it overrides the conf value.
+ * </li>
+ * </ol>
+ * This puts the build properties in charge of everything. It's not a
+ * perfect design; having maven set properties based on a file, as ant let
+ * you do, is better for customization.
+ *
+ * As to why there's a special (unset) value, see
+ * {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
+ * @param conf config: may be null
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static long getTestPropertyBytes(Configuration conf,
+ String key, String defVal) {
+ return org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix
+ .string2long(getTestProperty(conf, key, defVal));
+ }
+
+ /**
+ * Get an integer test property; algorithm described in
+ * {@link #getTestPropertyLong(Configuration, String, long)}.
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static int getTestPropertyInt(Configuration conf,
+ String key, int defVal) {
+ return (int) getTestPropertyLong(conf, key, defVal);
+ }
+
+ /**
+ * Get a boolean test property; algorithm described in
+ * {@link #getTestPropertyLong(Configuration, String, long)}.
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+ public static boolean getTestPropertyBool(Configuration conf,
+ String key,
+ boolean defVal) {
+ return Boolean.valueOf(
+ getTestProperty(conf, key, Boolean.toString(defVal)));
+ }
+
+ /**
+ * Get a string test property.
+ * <ol>
+ * <li>Look up configuration value (which can pick up core-default.xml),
+ * using {@code defVal} as the default value (if conf != null).
+ * </li>
+ * <li>Fetch the system property.</li>
+ * <li>If the system property is not empty or "(unset)":
+ * it overrides the conf value.
+ * </li>
+ * </ol>
+ * This puts the build properties in charge of everything. It's not a
+ * perfect design; having maven set properties based on a file, as ant let
+ * you do, is better for customization.
+ *
+ * As to why there's a special (unset) value, see
+ * @see <a href="http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven">
+ * Stack Overflow</a>
+ * @param conf config: may be null
+ * @param key key to look up
+ * @param defVal default value
+ * @return the evaluated test property.
+ */
+
+ public static String getTestProperty(Configuration conf,
+ String key,
+ String defVal) {
+ String confVal = conf != null
+ ? conf.getTrimmed(key, defVal)
+ : defVal;
+ String propval = System.getProperty(key);
+ return StringUtils.isNotEmpty(propval) && !UNSET_PROPERTY.equals(propval)
+ ? propval : confVal;
+ }
+
+ /**
+ * Verify the class of an exception. If it is not as expected, rethrow it.
+ * Comparison is on the exact class, not subclass-of inference as
+ * offered by {@code instanceof}.
+ * @param clazz the expected exception class
+ * @param ex the exception caught
+ * @return the exception, if it is of the expected class
+ * @throws Exception the exception passed in.
+ */
+ public static Exception verifyExceptionClass(Class clazz,
+ Exception ex)
+ throws Exception {
+ if (!(ex.getClass().equals(clazz))) {
+ throw ex;
+ }
+ return ex;
+ }
+
+ /**
+ * Turn off FS Caching: use if a filesystem with different options from
+ * the default is required.
+ * @param conf configuration to patch
+ */
+ public static void disableFilesystemCaching(Configuration conf) {
+ conf.setBoolean("fs.wasb.impl.disable.cache", true);
+ }
+
+ /**
+ * Create a test path, using the value of
+ * {@link AzureTestUtils#TEST_UNIQUE_FORK_ID} if it is set.
+ * @param defVal default value
+ * @return a path
+ */
+ public static Path createTestPath(Path defVal) {
+ String testUniqueForkId = System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID);
+ return testUniqueForkId == null
+ ? defVal
+ : new Path("/" + testUniqueForkId, "test");
+ }
+
+ /**
+ * Create a test page blob path using the value of
+ * {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
+ * @param filename filename at the end of the path
+ * @return an absolute path
+ */
+ public static Path blobPathForTests(FileSystem fs, String filename) {
+ String testUniqueForkId = System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID);
+ return fs.makeQualified(new Path(PAGE_BLOB_DIR,
+ testUniqueForkId == null
+ ? filename
+ : (testUniqueForkId + "/" + filename)));
+ }
+
+ /**
+ * Create a test path using the value of
+ * {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
+ * @param filename filename at the end of the path
+ * @return an absolute path
+ */
+ public static Path pathForTests(FileSystem fs, String filename) {
+ String testUniqueForkId = System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID);
+ return fs.makeQualified(new Path(
+ testUniqueForkId == null
+ ? ("/test/" + filename)
+ : (testUniqueForkId + "/" + filename)));
+ }
+
+ /**
+ * Get a unique fork ID.
+ * Returns a default value for non-parallel tests.
+ * @return a string unique for all test VMs running in this maven build.
+ */
+ public static String getForkID() {
+ return System.getProperty(
+ AzureTestConstants.TEST_UNIQUE_FORK_ID, "fork-1");
+ }
+
+ /**
+ * Flag to indicate that this test is being executed in parallel.
+ * This is used by some of the scale tests to validate test time expectations.
+ * @return true if the build indicates this test is being run in parallel.
+ */
+ public static boolean isParallelExecution() {
+ return Boolean.getBoolean(KEY_PARALLEL_TEST_EXECUTION);
+ }
+
+ /**
+ * Asserts that {@code obj} is an instance of {@code expectedClass} using a
+ * descriptive assertion message.
+ * @param expectedClass class
+ * @param obj object to check
+ */
+ public static void assertInstanceOf(Class<?> expectedClass, Object obj) {
+ Assert.assertTrue(String.format("Expected instance of class %s, but is %s.",
+ expectedClass, obj.getClass()),
+ expectedClass.isAssignableFrom(obj.getClass()));
+ }
+
+ /**
+ * Builds a comma-separated list of class names.
+ * @param classes list of classes
+ * @return comma-separated list of class names
+ */
+ public static <T extends Class<?>> String buildClassListString(
+ List<T> classes) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < classes.size(); ++i) {
+ if (i > 0) {
+ sb.append(',');
+ }
+ sb.append(classes.get(i).getName());
+ }
+ return sb.toString();
+ }
+
+ /**
+ * This class should not be instantiated.
+ */
+ private AzureTestUtils() {
+ }
+
+ /**
+ * Assert that a configuration option matches the expected value.
+ * @param conf configuration
+ * @param key option key
+ * @param expected expected value
+ */
+ public static void assertOptionEquals(Configuration conf,
+ String key,
+ String expected) {
+ assertEquals("Value of " + key, expected, conf.get(key));
+ }
+
+ /**
+ * Assume that a condition is met. If not: log at WARN and
+ * then throw an {@link AssumptionViolatedException}.
+ * @param message message in an assumption
+ * @param condition condition to probe
+ */
+ public static void assume(String message, boolean condition) {
+ if (!condition) {
+ LOG.warn(message);
+ }
+ Assume.assumeTrue(message, condition);
+ }
+
+ /**
+ * Gets the current value of the given gauge.
+ * @param fs filesystem
+ * @param gaugeName gauge name
+ * @return the gauge value
+ */
+ public static long getLongGaugeValue(NativeAzureFileSystem fs,
+ String gaugeName) {
+ return getLongGauge(gaugeName, getMetrics(fs.getInstrumentation()));
+ }
+
+ /**
+ * Gets the current value of the given counter.
+ * @param fs filesystem
+ * @param counterName counter name
+ * @return the counter value
+ */
+ public static long getLongCounterValue(NativeAzureFileSystem fs,
+ String counterName) {
+ return getLongCounter(counterName, getMetrics(fs.getInstrumentation()));
+ }
+
+
+ /**
+ * Delete a path, catching any exception and downgrading to a log message.
+ * @param fs filesystem
+ * @param path path to delete
+ * @param recursive recursive delete?
+ * @throws IOException IO failure.
+ */
+ public static void deleteQuietly(FileSystem fs,
+ Path path,
+ boolean recursive) throws IOException {
+ if (fs != null && path != null) {
+ try {
+ fs.delete(path, recursive);
+ } catch (IOException e) {
+ LOG.warn("When deleting {}", path, e);
+ }
+ }
+ }
+
+
+ /**
+ * Clean up the test account if non-null; return null to put in the
+ * field.
+ * @param testAccount test account to clean up
+ * @return null
+ * @throws Execption cleanup problems
+ */
+ public static AzureBlobStorageTestAccount cleanup(
+ AzureBlobStorageTestAccount testAccount) throws Exception {
+ if (testAccount != null) {
+ testAccount.cleanup();
+ testAccount = null;
+ }
+ return null;
+ }
+
+
+ /**
+ * Clean up the test account; any thrown exceptions are caught and
+ * logged.
+ * @param testAccount test account
+ * @return null, so that any fields can be reset.
+ */
+ public static AzureBlobStorageTestAccount cleanupTestAccount(
+ AzureBlobStorageTestAccount testAccount) {
+ if (testAccount != null) {
+ try {
+ testAccount.cleanup();
+ } catch (Exception e) {
+ LOG.error("While cleaning up test account: ", e);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Assume that the scale tests are enabled by the relevant system property.
+ */
+ public static void assumeScaleTestsEnabled(Configuration conf) {
+ boolean enabled = getTestPropertyBool(
+ conf,
+ KEY_SCALE_TESTS_ENABLED,
+ DEFAULT_SCALE_TESTS_ENABLED);
+ assume("Scale test disabled: to enable set property "
+ + KEY_SCALE_TESTS_ENABLED,
+ enabled);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
new file mode 100644
index 0000000..059a8c4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.util.EnumSet;
+
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+
+/**
+ * This looks like a test, but it is really a command to invoke to
+ * clean up containers created in other test runs.
+ *
+ */
+public class CleanupTestContainers extends AbstractWasbTestBase {
+
+ private static final String CONTAINER_PREFIX = "wasbtests-";
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(
+ "CleanupTestContainers",
+ EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+ createConfiguration(),
+ true);
+ }
+
+ @Test
+ public void testEnumContainers() throws Throwable {
+ describe("Enumerating all the WASB test containers");
+
+ int count = 0;
+ CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
+ CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
+ Iterable<CloudBlobContainer> containers
+ = blobClient.listContainers(CONTAINER_PREFIX);
+ for (CloudBlobContainer container : containers) {
+ count++;
+ LOG.info("Container {} URI {}",
+ container.getName(),
+ container.getUri());
+ }
+ LOG.info("Found {} test containers", count);
+ }
+
+ @Test
+ public void testDeleteContainers() throws Throwable {
+ describe("Delete all the WASB test containers");
+ int count = 0;
+ CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
+ CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
+ Iterable<CloudBlobContainer> containers
+ = blobClient.listContainers(CONTAINER_PREFIX);
+ for (CloudBlobContainer container : containers) {
+ LOG.info("Container {} URI {}",
+ container.getName(),
+ container.getUri());
+ if (container.deleteIfExists()) {
+ count++;
+ }
+ }
+ LOG.info("Deleted {} test containers", count);
+ }
+
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
new file mode 100644
index 0000000..850aca1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * <b>Important:</b> the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ *
+ * <b>Ignore checkstyle complaints about naming: we need a scheme with visible
+ * ordering.</b>
+ */
+
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class ITestAzureHugeFiles extends AbstractAzureScaleTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ ITestAzureHugeFiles.class);
+
+ private Path scaleTestDir;
+ private Path hugefile;
+ private Path hugefileRenamed;
+ private AzureBlobStorageTestAccount testAccountForCleanup;
+
+ private static final int UPLOAD_BLOCKSIZE = 64 * S_1K;
+ private static final byte[] SOURCE_DATA;
+
+ static {
+ SOURCE_DATA = dataset(UPLOAD_BLOCKSIZE, 0, S_256);
+ }
+
+ private Path testPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = path("ITestAzureHugeFiles");
+ scaleTestDir = new Path(testPath, "scale");
+ hugefile = new Path(scaleTestDir, "hugefile");
+ hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+ }
+
+ /**
+ * Only clean up the test account (and delete the container) if the account
+ * is set in the field {@code testAccountForCleanup}.
+ * @throws Exception
+ */
+ @Override
+ public void tearDown() throws Exception {
+ testAccount = null;
+ super.tearDown();
+ if (testAccountForCleanup != null) {
+ cleanupTestAccount(testAccount);
+ }
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create(
+ "testazurehugefiles",
+ EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+ createConfiguration(),
+ true);
+ }
+
+ /**
+ * Stop the test-case teardown from deleting the test path.
+ * @throws IOException never
+ */
+ protected void deleteTestDirInTeardown() throws IOException {
+ // this is a no-op, so the test file is preserved.
+ // the last test in the suite does the teardown
+ }
+
+ protected void deleteHugeFile() throws IOException {
+ describe("Deleting %s", hugefile);
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ getFileSystem().delete(hugefile, false);
+ timer.end("time to delete %s", hugefile);
+ }
+
+ /**
+ * Log how long an IOP took, by dividing the total time by the
+ * count of operations, printing in a human-readable form.
+ * @param operation operation being measured
+ * @param timer timing data
+ * @param count IOP count.
+ */
+ protected void logTimePerIOP(String operation,
+ ContractTestUtils.NanoTimer timer,
+ long count) {
+ LOG.info("Time per {}: {} nS",
+ operation, toHuman(timer.duration() / count));
+ }
+
+ /**
+ * Assume that the huge file exists, skip if not/empty.
+ * @return the file status
+ * @throws IOException IO failure
+ */
+ FileStatus assumeHugeFileExists() throws IOException {
+ assertPathExists(getFileSystem(), "huge file not created", hugefile);
+ try {
+ FileStatus status = getFileSystem().getFileStatus(hugefile);
+ Assume.assumeTrue("Not a file: " + status, status.isFile());
+ Assume.assumeTrue("File " + hugefile + " is empty", status.getLen() > 0);
+ return status;
+ } catch (FileNotFoundException e) {
+ skip("huge file not created: " + hugefile);
+ }
+ return null;
+ }
+
+ /**
+ * If/when {@link NativeAzureFileSystem#getStorageStatistics()} returns
+ * statistics, this will be interesting.
+ */
+ private void logFSState() {
+ StorageStatistics statistics = getFileSystem().getStorageStatistics();
+ Iterator<StorageStatistics.LongStatistic> longStatistics
+ = statistics.getLongStatistics();
+ while (longStatistics.hasNext()) {
+ StorageStatistics.LongStatistic next = longStatistics.next();
+ LOG.info("{} = {}", next.getName(), next.getValue());
+ }
+ }
+
+ @Test
+ public void test_010_CreateHugeFile() throws IOException {
+ long filesize = getTestPropertyBytes(getConfiguration(),
+ KEY_HUGE_FILESIZE,
+ DEFAULT_HUGE_FILESIZE);
+ long filesizeMB = filesize / S_1M;
+
+ // clean up from any previous attempts
+ deleteHugeFile();
+
+ describe("Creating file %s of size %d MB", hugefile, filesizeMB);
+
+ // now do a check of available upload time, with a pessimistic bandwidth
+ // (that of remote upload tests). If the test times out then not only is
+ // the test outcome lost, as the follow-on tests continue, they will
+ // overlap with the ongoing upload test, for much confusion.
+/*
+ int timeout = getTestTimeoutSeconds();
+ // assume 1 MB/s upload bandwidth
+ int bandwidth = _1MB;
+ long uploadTime = filesize / bandwidth;
+ assertTrue(String.format("Timeout set in %s seconds is too low;" +
+ " estimating upload time of %d seconds at 1 MB/s." +
+ " Rerun tests with -D%s=%d",
+ timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
+ uploadTime < timeout);
+*/
+ assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize
+ + " is not a multiple of " + UPLOAD_BLOCKSIZE,
+ 0, filesize % UPLOAD_BLOCKSIZE);
+
+ byte[] data = SOURCE_DATA;
+
+ long blocks = filesize / UPLOAD_BLOCKSIZE;
+ long blocksPerMB = S_1M / UPLOAD_BLOCKSIZE;
+
+ // perform the upload.
+ // there's lots of logging here, so that a tail -f on the output log
+ // can give a view of what is happening.
+ NativeAzureFileSystem fs = getFileSystem();
+
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ long blocksPer10MB = blocksPerMB * 10;
+ fs.mkdirs(hugefile.getParent());
+ try (FSDataOutputStream out = fs.create(hugefile,
+ true,
+ UPLOAD_BLOCKSIZE,
+ null)) {
+ for (long block = 1; block <= blocks; block++) {
+ out.write(data);
+ long written = block * UPLOAD_BLOCKSIZE;
+ // every 10 MB and on file upload @ 100%, print some stats
+ if (block % blocksPer10MB == 0 || written == filesize) {
+ long percentage = written * 100 / filesize;
+ double elapsedTime = timer.elapsedTime() / NANOSEC;
+ double writtenMB = 1.0 * written / S_1M;
+ LOG.info(String.format("[%02d%%] Buffered %.2f MB out of %d MB;"
+ + " elapsedTime=%.2fs; write to buffer bandwidth=%.2f MB/s",
+ percentage,
+ writtenMB,
+ filesizeMB,
+ elapsedTime,
+ writtenMB / elapsedTime));
+ }
+ }
+ // now close the file
+ LOG.info("Closing stream {}", out);
+ ContractTestUtils.NanoTimer closeTimer
+ = new ContractTestUtils.NanoTimer();
+ out.close();
+ closeTimer.end("time to close() output stream");
+ }
+
+ timer.end("time to write %d MB in blocks of %d",
+ filesizeMB, UPLOAD_BLOCKSIZE);
+ logFSState();
+ bandwidth(timer, filesize);
+ ContractTestUtils.assertPathExists(fs, "Huge file", hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
+ ContractTestUtils.assertIsFile(hugefile, status);
+ assertEquals("File size in " + status, filesize, status.getLen());
+ }
+
+ @Test
+ public void test_040_PositionedReadHugeFile() throws Throwable {
+ assumeHugeFileExists();
+ describe("Positioned reads of file %s", hugefile);
+ NativeAzureFileSystem fs = getFileSystem();
+ FileStatus status = fs.getFileStatus(hugefile);
+ long filesize = status.getLen();
+ int ops = 0;
+ final int bufferSize = 8192;
+ byte[] buffer = new byte[bufferSize];
+ long eof = filesize - 1;
+
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ ContractTestUtils.NanoTimer readAtByte0, readAtByte0Again, readAtEOF;
+ try (FSDataInputStream in = openDataFile()) {
+ readAtByte0 = new ContractTestUtils.NanoTimer();
+ in.readFully(0, buffer);
+ readAtByte0.end("time to read data at start of file");
+ ops++;
+
+ readAtEOF = new ContractTestUtils.NanoTimer();
+ in.readFully(eof - bufferSize, buffer);
+ readAtEOF.end("time to read data at end of file");
+ ops++;
+
+ readAtByte0Again = new ContractTestUtils.NanoTimer();
+ in.readFully(0, buffer);
+ readAtByte0Again.end("time to read data at start of file again");
+ ops++;
+ LOG.info("Final stream state: {}", in);
+ }
+ long mb = Math.max(filesize / S_1M, 1);
+
+ logFSState();
+ timer.end("time to performed positioned reads of %d MB ", mb);
+ LOG.info("Time per positioned read = {} nS",
+ toHuman(timer.nanosPerOperation(ops)));
+ }
+
+ protected FSDataInputStream openDataFile() throws IOException {
+ NanoTimer openTimer = new NanoTimer();
+ FSDataInputStream inputStream = getFileSystem().open(hugefile,
+ UPLOAD_BLOCKSIZE);
+ openTimer.end("open data file");
+ return inputStream;
+ }
+
+
+ /**
+ * Work out the bandwidth in bytes/second.
+ * @param timer timer measuring the duration
+ * @param bytes bytes
+ * @return the number of bytes/second of the recorded operation
+ */
+ public static double bandwidthInBytes(NanoTimer timer, long bytes) {
+ return bytes * NANOSEC / timer.duration();
+ }
+
+ @Test
+ public void test_050_readHugeFile() throws Throwable {
+ assumeHugeFileExists();
+ describe("Reading %s", hugefile);
+ NativeAzureFileSystem fs = getFileSystem();
+ FileStatus status = fs.getFileStatus(hugefile);
+ long filesize = status.getLen();
+ long blocks = filesize / UPLOAD_BLOCKSIZE;
+ byte[] data = new byte[UPLOAD_BLOCKSIZE];
+
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ try (FSDataInputStream in = openDataFile()) {
+ for (long block = 0; block < blocks; block++) {
+ in.readFully(data);
+ }
+ LOG.info("Final stream state: {}", in);
+ }
+
+ long mb = Math.max(filesize / S_1M, 1);
+ timer.end("time to read file of %d MB ", mb);
+ LOG.info("Time per MB to read = {} nS",
+ toHuman(timer.nanosPerOperation(mb)));
+ bandwidth(timer, filesize);
+ logFSState();
+ }
+
+ @Test
+ public void test_060_openAndReadWholeFileBlocks() throws Throwable {
+ FileStatus status = assumeHugeFileExists();
+ int blockSize = S_1M;
+ describe("Open the test file and read it in blocks of size %d",
+ blockSize);
+ long len = status.getLen();
+ FSDataInputStream in = openDataFile();
+ NanoTimer timer2 = null;
+ long blockCount = 0;
+ long totalToRead = 0;
+ int resetCount = 0;
+ try {
+ byte[] block = new byte[blockSize];
+ timer2 = new NanoTimer();
+ long count = 0;
+ // implicitly rounding down here
+ blockCount = len / blockSize;
+ totalToRead = blockCount * blockSize;
+ long minimumBandwidth = S_128K;
+ int maxResetCount = 4;
+ resetCount = 0;
+ for (long i = 0; i < blockCount; i++) {
+ int offset = 0;
+ int remaining = blockSize;
+ long blockId = i + 1;
+ NanoTimer blockTimer = new NanoTimer();
+ int reads = 0;
+ while (remaining > 0) {
+ NanoTimer readTimer = new NanoTimer();
+ int bytesRead = in.read(block, offset, remaining);
+ reads++;
+ if (bytesRead == 1) {
+ break;
+ }
+ remaining -= bytesRead;
+ offset += bytesRead;
+ count += bytesRead;
+ readTimer.end();
+ if (bytesRead != 0) {
+ LOG.debug("Bytes in read #{}: {} , block bytes: {},"
+ + " remaining in block: {}"
+ + " duration={} nS; ns/byte: {}, bandwidth={} MB/s",
+ reads, bytesRead, blockSize - remaining, remaining,
+ readTimer.duration(),
+ readTimer.nanosPerOperation(bytesRead),
+ readTimer.bandwidthDescription(bytesRead));
+ } else {
+ LOG.warn("0 bytes returned by read() operation #{}", reads);
+ }
+ }
+ blockTimer.end("Reading block %d in %d reads", blockId, reads);
+ String bw = blockTimer.bandwidthDescription(blockSize);
+ LOG.info("Bandwidth of block {}: {} MB/s: ", blockId, bw);
+ if (bandwidthInBytes(blockTimer, blockSize) < minimumBandwidth) {
+ LOG.warn("Bandwidth {} too low on block {}: resetting connection",
+ bw, blockId);
+ Assert.assertTrue("Bandwidth of " + bw + " too low after "
+ + resetCount + " attempts", resetCount <= maxResetCount);
+ resetCount++;
+ // reset the connection
+ }
+ }
+ } finally {
+ IOUtils.closeStream(in);
+ }
+ timer2.end("Time to read %d bytes in %d blocks", totalToRead, blockCount);
+ LOG.info("Overall Bandwidth {} MB/s; reset connections {}",
+ timer2.bandwidth(totalToRead), resetCount);
+ }
+
+ @Test
+ public void test_100_renameHugeFile() throws Throwable {
+ assumeHugeFileExists();
+ describe("renaming %s to %s", hugefile, hugefileRenamed);
+ NativeAzureFileSystem fs = getFileSystem();
+ FileStatus status = fs.getFileStatus(hugefile);
+ long filesize = status.getLen();
+ fs.delete(hugefileRenamed, false);
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+ fs.rename(hugefile, hugefileRenamed);
+ long mb = Math.max(filesize / S_1M, 1);
+ timer.end("time to rename file of %d MB", mb);
+ LOG.info("Time per MB to rename = {} nS",
+ toHuman(timer.nanosPerOperation(mb)));
+ bandwidth(timer, filesize);
+ logFSState();
+ FileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
+ assertEquals(filesize, destFileStatus.getLen());
+
+ // rename back
+ ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
+ fs.rename(hugefileRenamed, hugefile);
+ timer2.end("Renaming back");
+ LOG.info("Time per MB to rename = {} nS",
+ toHuman(timer2.nanosPerOperation(mb)));
+ bandwidth(timer2, filesize);
+ }
+
+ @Test
+ public void test_999_deleteHugeFiles() throws IOException {
+ // mark the test account for cleanup after this test
+ testAccountForCleanup = testAccount;
+ deleteHugeFile();
+ ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
+ NativeAzureFileSystem fs = getFileSystem();
+ fs.delete(hugefileRenamed, false);
+ timer2.end("time to delete %s", hugefileRenamed);
+ rm(fs, testPath, true, false);
+ assertPathDoesNotExist(fs, "deleted huge file", testPath);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
new file mode 100644
index 0000000..92b10cf
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+/**
+ * Sizes of data.
+ * Checkstyle doesn't like the naming scheme or the fact its an interface.
+ */
+public interface Sizes {
+
+ int S_256 = 256;
+ int S_512 = 512;
+ int S_1K = 1024;
+ int S_4K = 4 * S_1K;
+ int S_8K = 8 * S_1K;
+ int S_16K = 16 * S_1K;
+ int S_32K = 32 * S_1K;
+ int S_64K = 64 * S_1K;
+ int S_128K = 128 * S_1K;
+ int S_256K = 256 * S_1K;
+ int S_1M = S_1K * S_1K;
+ int S_2M = 2 * S_1M;
+ int S_5M = 5 * S_1M;
+ int S_10M = 10* S_1M;
+ double NANOSEC = 1.0e9;
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
new file mode 100644
index 0000000..60e24ee
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
@@ -0,0 +1,586 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.metrics;
+
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_CLIENT_ERRORS;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DIRECTORIES_CREATED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_LATENCY;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_RATE;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_CREATED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_DELETED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_SERVER_ERRORS;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_LATENCY;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_RATE;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.verify;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Date;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.AzureException;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.hamcrest.BaseMatcher;
+import org.hamcrest.Description;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Instrumentation test, changing state of time and verifying metrics are
+ * consistent.
+ */
+public class ITestAzureFileSystemInstrumentation extends AbstractWasbTestBase {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(ITestAzureFileSystemInstrumentation.class);
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Test
+ public void testMetricTags() throws Exception {
+ String accountName =
+ getTestAccount().getRealAccount().getBlobEndpoint()
+ .getAuthority();
+ String containerName =
+ getTestAccount().getRealContainer().getName();
+ MetricsRecordBuilder myMetrics = getMyMetrics();
+ verify(myMetrics).add(argThat(
+ new TagMatcher("accountName", accountName)
+ ));
+ verify(myMetrics).add(argThat(
+ new TagMatcher("containerName", containerName)
+ ));
+ verify(myMetrics).add(argThat(
+ new TagMatcher("Context", "azureFileSystem")
+ ));
+ verify(myMetrics).add(argThat(
+ new TagExistsMatcher("wasbFileSystemId")
+ ));
+ }
+
+
+ @Test
+ public void testMetricsOnMkdirList() throws Exception {
+ long base = getBaseWebResponses();
+
+ // Create a directory
+ assertTrue(fs.mkdirs(new Path("a")));
+ // At the time of writing
+ // getAncestor uses 2 calls for each folder level /user/<name>/a
+ // plus 1 call made by checkContainer
+ // mkdir checks the hierarchy with 2 calls per level
+ // mkdirs calls storeEmptyDir to create the empty folder, which makes 5 calls
+ // For a total of 7 + 6 + 5 = 18 web responses
+ base = assertWebResponsesInRange(base, 1, 18);
+ assertEquals(1,
+ AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
+
+ // List the root contents
+ assertEquals(1, getFileSystem().listStatus(new Path("/")).length);
+ base = assertWebResponsesEquals(base, 1);
+
+ assertNoErrors();
+ }
+
+ private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
+ NativeAzureFileSystem azureFs = (NativeAzureFileSystem) getFileSystem();
+ AzureNativeFileSystemStore azureStore = azureFs.getStore();
+ return azureStore.getBandwidthGaugeUpdater();
+ }
+
+ private static byte[] nonZeroByteArray(int size) {
+ byte[] data = new byte[size];
+ Arrays.fill(data, (byte)5);
+ return data;
+ }
+
+ @Test
+ public void testMetricsOnFileCreateRead() throws Exception {
+ long base = getBaseWebResponses();
+
+ assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
+
+ Path filePath = new Path("/metricsTest_webResponses");
+ final int FILE_SIZE = 1000;
+
+ // Suppress auto-update of bandwidth metrics so we get
+ // to update them exactly when we want to.
+ getBandwidthGaugeUpdater().suppressAutoUpdate();
+
+ // Create a file
+ Date start = new Date();
+ OutputStream outputStream = getFileSystem().create(filePath);
+ outputStream.write(nonZeroByteArray(FILE_SIZE));
+ outputStream.close();
+ long uploadDurationMs = new Date().getTime() - start.getTime();
+
+ // The exact number of requests/responses that happen to create a file
+ // can vary - at the time of writing this code it takes 10
+ // requests/responses for the 1000 byte file (33 for 100 MB),
+ // plus the initial container-check request but that
+ // can very easily change in the future. Just assert that we do roughly
+ // more than 2 but less than 15.
+ logOpResponseCount("Creating a 1K file", base);
+ base = assertWebResponsesInRange(base, 2, 15);
+ getBandwidthGaugeUpdater().triggerUpdate(true);
+ long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
+ assertTrue("The bytes written in the last second " + bytesWritten +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
+ long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
+ assertTrue("The total bytes written " + totalBytesWritten +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
+ long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
+ LOG.info("Upload rate: " + uploadRate + " bytes/second.");
+ long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
+ assertTrue("The upload rate " + uploadRate +
+ " is below the expected range of around " + expectedRate +
+ " bytes/second that the unit test observed. This should never be" +
+ " the case since the test underestimates the rate by looking at " +
+ " end-to-end time instead of just block upload time.",
+ uploadRate >= expectedRate);
+ long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_UPLOAD_LATENCY);
+ LOG.info("Upload latency: {}", uploadLatency);
+ long expectedLatency = uploadDurationMs; // We're uploading less than a block.
+ assertTrue("The upload latency " + uploadLatency +
+ " should be greater than zero now that I've just uploaded a file.",
+ uploadLatency > 0);
+ assertTrue("The upload latency " + uploadLatency +
+ " is more than the expected range of around " + expectedLatency +
+ " milliseconds that the unit test observed. This should never be" +
+ " the case since the test overestimates the latency by looking at " +
+ " end-to-end time instead of just block upload time.",
+ uploadLatency <= expectedLatency);
+
+ // Read the file
+ start = new Date();
+ InputStream inputStream = getFileSystem().open(filePath);
+ int count = 0;
+ while (inputStream.read() >= 0) {
+ count++;
+ }
+ inputStream.close();
+ long downloadDurationMs = new Date().getTime() - start.getTime();
+ assertEquals(FILE_SIZE, count);
+
+ // Again, exact number varies. At the time of writing this code
+ // it takes 4 request/responses, so just assert a rough range between
+ // 1 and 10.
+ logOpResponseCount("Reading a 1K file", base);
+ base = assertWebResponsesInRange(base, 1, 10);
+ getBandwidthGaugeUpdater().triggerUpdate(false);
+ long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
+ assertEquals(FILE_SIZE, totalBytesRead);
+ long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
+ assertTrue("The bytes read in the last second " + bytesRead +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
+ long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
+ LOG.info("Download rate: " + downloadRate + " bytes/second.");
+ expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
+ assertTrue("The download rate " + downloadRate +
+ " is below the expected range of around " + expectedRate +
+ " bytes/second that the unit test observed. This should never be" +
+ " the case since the test underestimates the rate by looking at " +
+ " end-to-end time instead of just block download time.",
+ downloadRate >= expectedRate);
+ long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_DOWNLOAD_LATENCY);
+ LOG.info("Download latency: " + downloadLatency);
+ expectedLatency = downloadDurationMs; // We're downloading less than a block.
+ assertTrue("The download latency " + downloadLatency +
+ " should be greater than zero now that I've just downloaded a file.",
+ downloadLatency > 0);
+ assertTrue("The download latency " + downloadLatency +
+ " is more than the expected range of around " + expectedLatency +
+ " milliseconds that the unit test observed. This should never be" +
+ " the case since the test overestimates the latency by looking at " +
+ " end-to-end time instead of just block download time.",
+ downloadLatency <= expectedLatency);
+
+ assertNoErrors();
+ }
+
+ @Test
+ public void testMetricsOnBigFileCreateRead() throws Exception {
+ long base = getBaseWebResponses();
+
+ assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
+
+ Path filePath = new Path("/metricsTest_webResponses");
+ final int FILE_SIZE = 100 * 1024 * 1024;
+
+ // Suppress auto-update of bandwidth metrics so we get
+ // to update them exactly when we want to.
+ getBandwidthGaugeUpdater().suppressAutoUpdate();
+
+ // Create a file
+ OutputStream outputStream = getFileSystem().create(filePath);
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+
+ // The exact number of requests/responses that happen to create a file
+ // can vary - at the time of writing this code it takes 34
+ // requests/responses for the 100 MB file,
+ // plus the initial container check request, but that
+ // can very easily change in the future. Just assert that we do roughly
+ // more than 20 but less than 50.
+ logOpResponseCount("Creating a 100 MB file", base);
+ base = assertWebResponsesInRange(base, 20, 50);
+ getBandwidthGaugeUpdater().triggerUpdate(true);
+ long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
+ assertTrue("The total bytes written " + totalBytesWritten +
+ " is pretty far from the expected range of around " + FILE_SIZE +
+ " bytes plus a little overhead.",
+ totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
+ long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
+ LOG.info("Upload rate: " + uploadRate + " bytes/second.");
+ long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_UPLOAD_LATENCY);
+ LOG.info("Upload latency: " + uploadLatency);
+ assertTrue("The upload latency " + uploadLatency +
+ " should be greater than zero now that I've just uploaded a file.",
+ uploadLatency > 0);
+
+ // Read the file
+ InputStream inputStream = getFileSystem().open(filePath);
+ int count = 0;
+ while (inputStream.read() >= 0) {
+ count++;
+ }
+ inputStream.close();
+ assertEquals(FILE_SIZE, count);
+
+ // Again, exact number varies. At the time of writing this code
+ // it takes 27 request/responses, so just assert a rough range between
+ // 20 and 40.
+ logOpResponseCount("Reading a 100 MB file", base);
+ base = assertWebResponsesInRange(base, 20, 40);
+ getBandwidthGaugeUpdater().triggerUpdate(false);
+ long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
+ assertEquals(FILE_SIZE, totalBytesRead);
+ long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
+ LOG.info("Download rate: " + downloadRate + " bytes/second.");
+ long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+ WASB_DOWNLOAD_LATENCY);
+ LOG.info("Download latency: " + downloadLatency);
+ assertTrue("The download latency " + downloadLatency +
+ " should be greater than zero now that I've just downloaded a file.",
+ downloadLatency > 0);
+ }
+
+ @Test
+ public void testMetricsOnFileRename() throws Exception {
+ long base = getBaseWebResponses();
+
+ Path originalPath = new Path("/metricsTest_RenameStart");
+ Path destinationPath = new Path("/metricsTest_RenameFinal");
+
+ // Create an empty file
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
+ assertTrue(getFileSystem().createNewFile(originalPath));
+ logOpResponseCount("Creating an empty file", base);
+ base = assertWebResponsesInRange(base, 2, 20);
+ assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
+
+ // Rename the file
+ assertTrue(
+ ((FileSystem) getFileSystem()).rename(originalPath, destinationPath));
+ // Varies: at the time of writing this code it takes 7 requests/responses.
+ logOpResponseCount("Renaming a file", base);
+ base = assertWebResponsesInRange(base, 2, 15);
+
+ assertNoErrors();
+ }
+
+ @Test
+ public void testMetricsOnFileExistsDelete() throws Exception {
+ long base = getBaseWebResponses();
+
+ Path filePath = new Path("/metricsTest_delete");
+
+ // Check existence
+ assertFalse(getFileSystem().exists(filePath));
+ // At the time of writing this code it takes 2 requests/responses to
+ // check existence, which seems excessive, plus initial request for
+ // container check.
+ logOpResponseCount("Checking file existence for non-existent file", base);
+ base = assertWebResponsesInRange(base, 1, 3);
+
+ // Create an empty file
+ assertTrue(getFileSystem().createNewFile(filePath));
+ base = getCurrentWebResponses();
+
+ // Check existence again
+ assertTrue(getFileSystem().exists(filePath));
+ logOpResponseCount("Checking file existence for existent file", base);
+ base = assertWebResponsesInRange(base, 1, 2);
+
+ // Delete the file
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
+ assertTrue(getFileSystem().delete(filePath, false));
+ // At the time of writing this code it takes 4 requests/responses to
+ // delete, which seems excessive. Check for range 1-4 for now.
+ logOpResponseCount("Deleting a file", base);
+ base = assertWebResponsesInRange(base, 1, 4);
+ assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
+
+ assertNoErrors();
+ }
+
+ @Test
+ public void testMetricsOnDirRename() throws Exception {
+ long base = getBaseWebResponses();
+
+ Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
+ Path innerFileName = new Path(originalDirName, "innerFile");
+ Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
+
+ // Create an empty directory
+ assertTrue(getFileSystem().mkdirs(originalDirName));
+ base = getCurrentWebResponses();
+
+ // Create an inner file
+ assertTrue(getFileSystem().createNewFile(innerFileName));
+ base = getCurrentWebResponses();
+
+ // Rename the directory
+ assertTrue(getFileSystem().rename(originalDirName, destDirName));
+
+ // At the time of writing this code it takes 11 requests/responses
+ // to rename the directory with one file. Check for range 1-20 for now.
+ logOpResponseCount("Renaming a directory", base);
+ base = assertWebResponsesInRange(base, 1, 20);
+
+ assertNoErrors();
+ }
+
+ /**
+ * Recursive discovery of path depth
+ * @param path path to measure.
+ * @return depth, where "/" == 0.
+ */
+ int depth(Path path) {
+ if (path.isRoot()) {
+ return 0;
+ } else {
+ return 1 + depth(path.getParent());
+ }
+ }
+
+ @Test
+ public void testClientErrorMetrics() throws Exception {
+ String fileName = "metricsTestFile_ClientError";
+ Path filePath = new Path("/"+fileName);
+ final int FILE_SIZE = 100;
+ OutputStream outputStream = null;
+ String leaseID = null;
+ try {
+ // Create a file
+ outputStream = getFileSystem().create(filePath);
+ leaseID = getTestAccount().acquireShortLease(fileName);
+ try {
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+ assertTrue("Should've thrown", false);
+ } catch (AzureException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("lease"));
+ }
+ assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
+ } finally {
+ if(leaseID != null){
+ getTestAccount().releaseLease(leaseID, fileName);
+ }
+ IOUtils.closeStream(outputStream);
+ }
+ }
+
+ private void logOpResponseCount(String opName, long base) {
+ LOG.info("{} took {} web responses to complete.",
+ opName, getCurrentWebResponses() - base);
+ }
+
+ /**
+ * Gets (and asserts) the value of the wasb_web_responses counter just
+ * after the creation of the file system object.
+ */
+ private long getBaseWebResponses() {
+ // The number of requests should start at 0
+ return assertWebResponsesEquals(0, 0);
+ }
+
+ /**
+ * Gets the current value of the wasb_web_responses counter.
+ */
+ private long getCurrentWebResponses() {
+ return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
+ }
+
+ /**
+ * Checks that the wasb_web_responses counter is at the given value.
+ * @param base The base value (before the operation of interest).
+ * @param expected The expected value for the operation of interest.
+ * @return The new base value now.
+ */
+ private long assertWebResponsesEquals(long base, long expected) {
+ assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
+ return base + expected;
+ }
+
+ private void assertNoErrors() {
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
+ assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
+ }
+
+ /**
+ * Checks that the wasb_web_responses counter is in the given range.
+ * @param base The base value (before the operation of interest).
+ * @param inclusiveLowerLimit The lower limit for what it should increase by.
+ * @param inclusiveUpperLimit The upper limit for what it should increase by.
+ * @return The new base value now.
+ */
+ private long assertWebResponsesInRange(long base,
+ long inclusiveLowerLimit,
+ long inclusiveUpperLimit) {
+ long currentResponses = getCurrentWebResponses();
+ long justOperation = currentResponses - base;
+ assertTrue(String.format(
+ "Web responses expected in range [%d, %d], but was %d.",
+ inclusiveLowerLimit, inclusiveUpperLimit, justOperation),
+ justOperation >= inclusiveLowerLimit &&
+ justOperation <= inclusiveUpperLimit);
+ return currentResponses;
+ }
+
+ /**
+ * Gets the metrics for the file system object.
+ * @return The metrics record.
+ */
+ private MetricsRecordBuilder getMyMetrics() {
+ return getMetrics(getInstrumentation());
+ }
+
+ private AzureFileSystemInstrumentation getInstrumentation() {
+ return getFileSystem().getInstrumentation();
+ }
+
+ /**
+ * A matcher class for asserting that we got a tag with a given
+ * value.
+ */
+ private static class TagMatcher extends TagExistsMatcher {
+ private final String tagValue;
+
+ public TagMatcher(String tagName, String tagValue) {
+ super(tagName);
+ this.tagValue = tagValue;
+ }
+
+ @Override
+ public boolean matches(MetricsTag toMatch) {
+ return toMatch.value().equals(tagValue);
+ }
+
+ @Override
+ public void describeTo(Description desc) {
+ super.describeTo(desc);
+ desc.appendText(" with value " + tagValue);
+ }
+ }
+
+ /**
+ * A matcher class for asserting that we got a tag with any value.
+ */
+ private static class TagExistsMatcher extends BaseMatcher<MetricsTag> {
+ private final String tagName;
+
+ public TagExistsMatcher(String tagName) {
+ this.tagName = tagName;
+ }
+
+ @Override
+ public boolean matches(Object toMatch) {
+ MetricsTag asTag = (MetricsTag)toMatch;
+ return asTag.name().equals(tagName) && matches(asTag);
+ }
+
+ protected boolean matches(MetricsTag toMatch) {
+ return true;
+ }
+
+ @Override
+ public void describeTo(Description desc) {
+ desc.appendText("Has tag " + tagName);
+ }
+ }
+
+ /**
+ * A matcher class for asserting that a long value is in a
+ * given range.
+ */
+ private static class InRange extends BaseMatcher<Long> {
+ private final long inclusiveLowerLimit;
+ private final long inclusiveUpperLimit;
+ private long obtained;
+
+ public InRange(long inclusiveLowerLimit, long inclusiveUpperLimit) {
+ this.inclusiveLowerLimit = inclusiveLowerLimit;
+ this.inclusiveUpperLimit = inclusiveUpperLimit;
+ }
+
+ @Override
+ public boolean matches(Object number) {
+ obtained = (Long)number;
+ return obtained >= inclusiveLowerLimit &&
+ obtained <= inclusiveUpperLimit;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("Between " + inclusiveLowerLimit +
+ " and " + inclusiveUpperLimit + " inclusively");
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
deleted file mode 100644
index 818a844..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
+++ /dev/null
@@ -1,579 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure.metrics;
-
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_CLIENT_ERRORS;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DIRECTORIES_CREATED;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_LATENCY;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_RATE;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_CREATED;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_DELETED;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_SERVER_ERRORS;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_LATENCY;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_RATE;
-import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Mockito.verify;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Date;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
-import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.hamcrest.BaseMatcher;
-import org.hamcrest.Description;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestAzureFileSystemInstrumentation {
- private FileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- @Test
- public void testMetricTags() throws Exception {
- String accountName =
- testAccount.getRealAccount().getBlobEndpoint()
- .getAuthority();
- String containerName =
- testAccount.getRealContainer().getName();
- MetricsRecordBuilder myMetrics = getMyMetrics();
- verify(myMetrics).add(argThat(
- new TagMatcher("accountName", accountName)
- ));
- verify(myMetrics).add(argThat(
- new TagMatcher("containerName", containerName)
- ));
- verify(myMetrics).add(argThat(
- new TagMatcher("Context", "azureFileSystem")
- ));
- verify(myMetrics).add(argThat(
- new TagExistsMatcher("wasbFileSystemId")
- ));
- }
-
-
- @Test
- public void testMetricsOnMkdirList() throws Exception {
- long base = getBaseWebResponses();
-
- // Create a directory
- assertTrue(fs.mkdirs(new Path("a")));
- // At the time of writing
- // getAncestor uses 2 calls for each folder level /user/<name>/a
- // plus 1 call made by checkContainer
- // mkdir checks the hierarchy with 2 calls per level
- // mkdirs calls storeEmptyDir to create the empty folder, which makes 5 calls
- // For a total of 7 + 6 + 5 = 18 web responses
- base = assertWebResponsesInRange(base, 1, 18);
- assertEquals(1,
- AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
-
- // List the root contents
- assertEquals(1, fs.listStatus(new Path("/")).length);
- base = assertWebResponsesEquals(base, 1);
-
- assertNoErrors();
- }
-
- private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
- NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
- AzureNativeFileSystemStore azureStore = azureFs.getStore();
- return azureStore.getBandwidthGaugeUpdater();
- }
-
- private static byte[] nonZeroByteArray(int size) {
- byte[] data = new byte[size];
- Arrays.fill(data, (byte)5);
- return data;
- }
-
- @Test
- public void testMetricsOnFileCreateRead() throws Exception {
- long base = getBaseWebResponses();
-
- assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
-
- Path filePath = new Path("/metricsTest_webResponses");
- final int FILE_SIZE = 1000;
-
- // Suppress auto-update of bandwidth metrics so we get
- // to update them exactly when we want to.
- getBandwidthGaugeUpdater().suppressAutoUpdate();
-
- // Create a file
- Date start = new Date();
- OutputStream outputStream = fs.create(filePath);
- outputStream.write(nonZeroByteArray(FILE_SIZE));
- outputStream.close();
- long uploadDurationMs = new Date().getTime() - start.getTime();
-
- // The exact number of requests/responses that happen to create a file
- // can vary - at the time of writing this code it takes 10
- // requests/responses for the 1000 byte file (33 for 100 MB),
- // plus the initial container-check request but that
- // can very easily change in the future. Just assert that we do roughly
- // more than 2 but less than 15.
- logOpResponseCount("Creating a 1K file", base);
- base = assertWebResponsesInRange(base, 2, 15);
- getBandwidthGaugeUpdater().triggerUpdate(true);
- long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
- assertTrue("The bytes written in the last second " + bytesWritten +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
- long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
- assertTrue("The total bytes written " + totalBytesWritten +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
- long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
- System.out.println("Upload rate: " + uploadRate + " bytes/second.");
- long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
- assertTrue("The upload rate " + uploadRate +
- " is below the expected range of around " + expectedRate +
- " bytes/second that the unit test observed. This should never be" +
- " the case since the test underestimates the rate by looking at " +
- " end-to-end time instead of just block upload time.",
- uploadRate >= expectedRate);
- long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_UPLOAD_LATENCY);
- System.out.println("Upload latency: " + uploadLatency);
- long expectedLatency = uploadDurationMs; // We're uploading less than a block.
- assertTrue("The upload latency " + uploadLatency +
- " should be greater than zero now that I've just uploaded a file.",
- uploadLatency > 0);
- assertTrue("The upload latency " + uploadLatency +
- " is more than the expected range of around " + expectedLatency +
- " milliseconds that the unit test observed. This should never be" +
- " the case since the test overestimates the latency by looking at " +
- " end-to-end time instead of just block upload time.",
- uploadLatency <= expectedLatency);
-
- // Read the file
- start = new Date();
- InputStream inputStream = fs.open(filePath);
- int count = 0;
- while (inputStream.read() >= 0) {
- count++;
- }
- inputStream.close();
- long downloadDurationMs = new Date().getTime() - start.getTime();
- assertEquals(FILE_SIZE, count);
-
- // Again, exact number varies. At the time of writing this code
- // it takes 4 request/responses, so just assert a rough range between
- // 1 and 10.
- logOpResponseCount("Reading a 1K file", base);
- base = assertWebResponsesInRange(base, 1, 10);
- getBandwidthGaugeUpdater().triggerUpdate(false);
- long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
- assertEquals(FILE_SIZE, totalBytesRead);
- long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
- assertTrue("The bytes read in the last second " + bytesRead +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
- long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
- System.out.println("Download rate: " + downloadRate + " bytes/second.");
- expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
- assertTrue("The download rate " + downloadRate +
- " is below the expected range of around " + expectedRate +
- " bytes/second that the unit test observed. This should never be" +
- " the case since the test underestimates the rate by looking at " +
- " end-to-end time instead of just block download time.",
- downloadRate >= expectedRate);
- long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_DOWNLOAD_LATENCY);
- System.out.println("Download latency: " + downloadLatency);
- expectedLatency = downloadDurationMs; // We're downloading less than a block.
- assertTrue("The download latency " + downloadLatency +
- " should be greater than zero now that I've just downloaded a file.",
- downloadLatency > 0);
- assertTrue("The download latency " + downloadLatency +
- " is more than the expected range of around " + expectedLatency +
- " milliseconds that the unit test observed. This should never be" +
- " the case since the test overestimates the latency by looking at " +
- " end-to-end time instead of just block download time.",
- downloadLatency <= expectedLatency);
-
- assertNoErrors();
- }
-
- @Test
- public void testMetricsOnBigFileCreateRead() throws Exception {
- long base = getBaseWebResponses();
-
- assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
-
- Path filePath = new Path("/metricsTest_webResponses");
- final int FILE_SIZE = 100 * 1024 * 1024;
-
- // Suppress auto-update of bandwidth metrics so we get
- // to update them exactly when we want to.
- getBandwidthGaugeUpdater().suppressAutoUpdate();
-
- // Create a file
- OutputStream outputStream = fs.create(filePath);
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
-
- // The exact number of requests/responses that happen to create a file
- // can vary - at the time of writing this code it takes 34
- // requests/responses for the 100 MB file,
- // plus the initial container check request, but that
- // can very easily change in the future. Just assert that we do roughly
- // more than 20 but less than 50.
- logOpResponseCount("Creating a 100 MB file", base);
- base = assertWebResponsesInRange(base, 20, 50);
- getBandwidthGaugeUpdater().triggerUpdate(true);
- long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
- assertTrue("The total bytes written " + totalBytesWritten +
- " is pretty far from the expected range of around " + FILE_SIZE +
- " bytes plus a little overhead.",
- totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
- long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
- System.out.println("Upload rate: " + uploadRate + " bytes/second.");
- long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_UPLOAD_LATENCY);
- System.out.println("Upload latency: " + uploadLatency);
- assertTrue("The upload latency " + uploadLatency +
- " should be greater than zero now that I've just uploaded a file.",
- uploadLatency > 0);
-
- // Read the file
- InputStream inputStream = fs.open(filePath);
- int count = 0;
- while (inputStream.read() >= 0) {
- count++;
- }
- inputStream.close();
- assertEquals(FILE_SIZE, count);
-
- // Again, exact number varies. At the time of writing this code
- // it takes 27 request/responses, so just assert a rough range between
- // 20 and 40.
- logOpResponseCount("Reading a 100 MB file", base);
- base = assertWebResponsesInRange(base, 20, 40);
- getBandwidthGaugeUpdater().triggerUpdate(false);
- long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
- assertEquals(FILE_SIZE, totalBytesRead);
- long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
- System.out.println("Download rate: " + downloadRate + " bytes/second.");
- long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
- WASB_DOWNLOAD_LATENCY);
- System.out.println("Download latency: " + downloadLatency);
- assertTrue("The download latency " + downloadLatency +
- " should be greater than zero now that I've just downloaded a file.",
- downloadLatency > 0);
- }
-
- @Test
- public void testMetricsOnFileRename() throws Exception {
- long base = getBaseWebResponses();
-
- Path originalPath = new Path("/metricsTest_RenameStart");
- Path destinationPath = new Path("/metricsTest_RenameFinal");
-
- // Create an empty file
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
- assertTrue(fs.createNewFile(originalPath));
- logOpResponseCount("Creating an empty file", base);
- base = assertWebResponsesInRange(base, 2, 20);
- assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
-
- // Rename the file
- assertTrue(fs.rename(originalPath, destinationPath));
- // Varies: at the time of writing this code it takes 7 requests/responses.
- logOpResponseCount("Renaming a file", base);
- base = assertWebResponsesInRange(base, 2, 15);
-
- assertNoErrors();
- }
-
- @Test
- public void testMetricsOnFileExistsDelete() throws Exception {
- long base = getBaseWebResponses();
-
- Path filePath = new Path("/metricsTest_delete");
-
- // Check existence
- assertFalse(fs.exists(filePath));
- // At the time of writing this code it takes 2 requests/responses to
- // check existence, which seems excessive, plus initial request for
- // container check.
- logOpResponseCount("Checking file existence for non-existent file", base);
- base = assertWebResponsesInRange(base, 1, 3);
-
- // Create an empty file
- assertTrue(fs.createNewFile(filePath));
- base = getCurrentWebResponses();
-
- // Check existence again
- assertTrue(fs.exists(filePath));
- logOpResponseCount("Checking file existence for existent file", base);
- base = assertWebResponsesInRange(base, 1, 2);
-
- // Delete the file
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
- assertTrue(fs.delete(filePath, false));
- // At the time of writing this code it takes 4 requests/responses to
- // delete, which seems excessive. Check for range 1-4 for now.
- logOpResponseCount("Deleting a file", base);
- base = assertWebResponsesInRange(base, 1, 4);
- assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
-
- assertNoErrors();
- }
-
- @Test
- public void testMetricsOnDirRename() throws Exception {
- long base = getBaseWebResponses();
-
- Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
- Path innerFileName = new Path(originalDirName, "innerFile");
- Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
-
- // Create an empty directory
- assertTrue(fs.mkdirs(originalDirName));
- base = getCurrentWebResponses();
-
- // Create an inner file
- assertTrue(fs.createNewFile(innerFileName));
- base = getCurrentWebResponses();
-
- // Rename the directory
- assertTrue(fs.rename(originalDirName, destDirName));
- // At the time of writing this code it takes 11 requests/responses
- // to rename the directory with one file. Check for range 1-20 for now.
- logOpResponseCount("Renaming a directory", base);
- base = assertWebResponsesInRange(base, 1, 20);
-
- assertNoErrors();
- }
-
- @Test
- public void testClientErrorMetrics() throws Exception {
- String fileName = "metricsTestFile_ClientError";
- Path filePath = new Path("/"+fileName);
- final int FILE_SIZE = 100;
- OutputStream outputStream = null;
- String leaseID = null;
- try {
- // Create a file
- outputStream = fs.create(filePath);
- leaseID = testAccount.acquireShortLease(fileName);
- try {
- outputStream.write(new byte[FILE_SIZE]);
- outputStream.close();
- assertTrue("Should've thrown", false);
- } catch (AzureException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("lease"));
- }
- assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
- } finally {
- if(leaseID != null){
- testAccount.releaseLease(leaseID, fileName);
- }
- IOUtils.closeStream(outputStream);
- }
- }
-
- private void logOpResponseCount(String opName, long base) {
- System.out.println(opName + " took " + (getCurrentWebResponses() - base) +
- " web responses to complete.");
- }
-
- /**
- * Gets (and asserts) the value of the wasb_web_responses counter just
- * after the creation of the file system object.
- */
- private long getBaseWebResponses() {
- // The number of requests should start at 0
- return assertWebResponsesEquals(0, 0);
- }
-
- /**
- * Gets the current value of the wasb_web_responses counter.
- */
- private long getCurrentWebResponses() {
- return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
- }
-
- /**
- * Checks that the wasb_web_responses counter is at the given value.
- * @param base The base value (before the operation of interest).
- * @param expected The expected value for the operation of interest.
- * @return The new base value now.
- */
- private long assertWebResponsesEquals(long base, long expected) {
- assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
- return base + expected;
- }
-
- private void assertNoErrors() {
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
- assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
- }
-
- /**
- * Checks that the wasb_web_responses counter is in the given range.
- * @param base The base value (before the operation of interest).
- * @param inclusiveLowerLimit The lower limit for what it should increase by.
- * @param inclusiveUpperLimit The upper limit for what it should increase by.
- * @return The new base value now.
- */
- private long assertWebResponsesInRange(long base,
- long inclusiveLowerLimit,
- long inclusiveUpperLimit) {
- long currentResponses = getCurrentWebResponses();
- long justOperation = currentResponses - base;
- assertTrue(String.format(
- "Web responses expected in range [%d, %d], but was %d.",
- inclusiveLowerLimit, inclusiveUpperLimit, justOperation),
- justOperation >= inclusiveLowerLimit &&
- justOperation <= inclusiveUpperLimit);
- return currentResponses;
- }
-
- /**
- * Gets the metrics for the file system object.
- * @return The metrics record.
- */
- private MetricsRecordBuilder getMyMetrics() {
- return getMetrics(getInstrumentation());
- }
-
- private AzureFileSystemInstrumentation getInstrumentation() {
- return ((NativeAzureFileSystem)fs).getInstrumentation();
- }
-
- /**
- * A matcher class for asserting that we got a tag with a given
- * value.
- */
- private static class TagMatcher extends TagExistsMatcher {
- private final String tagValue;
-
- public TagMatcher(String tagName, String tagValue) {
- super(tagName);
- this.tagValue = tagValue;
- }
-
- @Override
- public boolean matches(MetricsTag toMatch) {
- return toMatch.value().equals(tagValue);
- }
-
- @Override
- public void describeTo(Description desc) {
- super.describeTo(desc);
- desc.appendText(" with value " + tagValue);
- }
- }
-
- /**
- * A matcher class for asserting that we got a tag with any value.
- */
- private static class TagExistsMatcher extends BaseMatcher<MetricsTag> {
- private final String tagName;
-
- public TagExistsMatcher(String tagName) {
- this.tagName = tagName;
- }
-
- @Override
- public boolean matches(Object toMatch) {
- MetricsTag asTag = (MetricsTag)toMatch;
- return asTag.name().equals(tagName) && matches(asTag);
- }
-
- protected boolean matches(MetricsTag toMatch) {
- return true;
- }
-
- @Override
- public void describeTo(Description desc) {
- desc.appendText("Has tag " + tagName);
- }
- }
-
- /**
- * A matcher class for asserting that a long value is in a
- * given range.
- */
- private static class InRange extends BaseMatcher<Long> {
- private final long inclusiveLowerLimit;
- private final long inclusiveUpperLimit;
- private long obtained;
-
- public InRange(long inclusiveLowerLimit, long inclusiveUpperLimit) {
- this.inclusiveLowerLimit = inclusiveLowerLimit;
- this.inclusiveUpperLimit = inclusiveUpperLimit;
- }
-
- @Override
- public boolean matches(Object number) {
- obtained = (Long)number;
- return obtained >= inclusiveLowerLimit &&
- obtained <= inclusiveUpperLimit;
- }
-
- @Override
- public void describeTo(Description description) {
- description.appendText("Between " + inclusiveLowerLimit +
- " and " + inclusiveUpperLimit + " inclusively");
- }
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
new file mode 100644
index 0000000..f969968
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import org.junit.Test;
+
+import com.microsoft.azure.storage.StorageException;
+
+/**
+ * Tests the Native Azure file system (WASB) against an actual blob store.
+ */
+public class ITestNativeAzureFileSystemLive extends
+ NativeAzureFileSystemBaseTest {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Test
+ public void testLazyRenamePendingCanOverwriteExistingFile()
+ throws Exception {
+ final String srcFile = "srcFile";
+ final String dstFile = "dstFile";
+ Path srcPath = path(srcFile);
+ FSDataOutputStream srcStream = fs.create(srcPath);
+ assertTrue(fs.exists(srcPath));
+ Path dstPath = path(dstFile);
+ FSDataOutputStream dstStream = fs.create(dstPath);
+ assertTrue(fs.exists(dstPath));
+ NativeAzureFileSystem nfs = fs;
+ final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath));
+ final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath));
+ nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null);
+ assertTrue(fs.exists(dstPath));
+ assertFalse(fs.exists(srcPath));
+ IOUtils.cleanupWithLogger(null, srcStream);
+ IOUtils.cleanupWithLogger(null, dstStream);
+ }
+ /**
+ * Tests fs.delete() function to delete a blob when another blob is holding a
+ * lease on it. Delete if called without a lease should fail if another process
+ * is holding a lease and throw appropriate exception
+ * This is a scenario that would happen in HMaster startup when it tries to
+ * clean up the temp dirs while the HMaster process which was killed earlier
+ * held lease on the blob when doing some DDL operation
+ */
+ @Test
+ public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
+ throws Exception {
+ LOG.info("Starting test");
+ // Create the file
+ Path path = methodPath();
+ fs.create(path);
+ assertPathExists("test file", path);
+ NativeAzureFileSystem nfs = fs;
+ final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
+ final AzureNativeFileSystemStore store = nfs.getStore();
+
+ // Acquire the lease on the file in a background thread
+ final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
+ final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ // Acquire the lease and then signal the main test thread.
+ SelfRenewingLease lease = null;
+ try {
+ lease = store.acquireLease(fullKey);
+ LOG.info("Lease acquired: " + lease.getLeaseID());
+ } catch (AzureException e) {
+ LOG.warn("Lease acqusition thread unable to acquire lease", e);
+ } finally {
+ leaseAttemptComplete.countDown();
+ }
+
+ // Wait for the main test thread to signal it will attempt the delete.
+ try {
+ beginningDeleteAttempt.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+
+ // Keep holding the lease past the lease acquisition retry interval, so
+ // the test covers the case of delete retrying to acquire the lease.
+ try {
+ Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+
+ try {
+ if (lease != null){
+ LOG.info("Freeing lease");
+ lease.free();
+ }
+ } catch (StorageException se) {
+ LOG.warn("Unable to free lease.", se);
+ }
+ }
+ };
+
+ // Start the background thread and wait for it to signal the lease is held.
+ t.start();
+ try {
+ leaseAttemptComplete.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+
+ // Try to delete the same file
+ beginningDeleteAttempt.countDown();
+ store.delete(fullKey);
+
+ // At this point file SHOULD BE DELETED
+ assertPathDoesNotExist("Leased path", path);
+ }
+
+ /**
+ * Check that isPageBlobKey works as expected. This assumes that
+ * in the test configuration, the list of supported page blob directories
+ * only includes "pageBlobs". That's why this test is made specific
+ * to this subclass.
+ */
+ @Test
+ public void testIsPageBlobKey() {
+ AzureNativeFileSystemStore store = fs.getStore();
+
+ // Use literal strings so it's easier to understand the tests.
+ // In case the constant changes, we want to know about it so we can update this test.
+ assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs");
+
+ // URI prefix for test environment.
+ String uriPrefix = "file:///";
+
+ // negative tests
+ String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo",
+ "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" };
+ for (String s : negativeKeys) {
+ assertFalse(store.isPageBlobKey(s));
+ assertFalse(store.isPageBlobKey(uriPrefix + s));
+ }
+
+ // positive tests
+ String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" };
+ for (String s : positiveKeys) {
+ assertTrue(store.isPageBlobKey(s));
+ assertTrue(store.isPageBlobKey(uriPrefix + s));
+ }
+ }
+
+ /**
+ * Test that isAtomicRenameKey() works as expected.
+ */
+ @Test
+ public void testIsAtomicRenameKey() {
+
+ AzureNativeFileSystemStore store = fs.getStore();
+
+ // We want to know if the default configuration changes so we can fix
+ // this test.
+ assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES,
+ "/atomicRenameDir1,/atomicRenameDir2");
+
+ // URI prefix for test environment.
+ String uriPrefix = "file:///";
+
+ // negative tests
+ String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase",
+ "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase",
+ "hbasexyz/", "foo/atomicRenameDir1/"};
+ for (String s : negativeKeys) {
+ assertFalse(store.isAtomicRenameKey(s));
+ assertFalse(store.isAtomicRenameKey(uriPrefix + s));
+ }
+
+ // Positive tests. The directories for atomic rename are /hbase
+ // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES
+ // for this test).
+ String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/",
+ "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"};
+ for (String s : positiveKeys) {
+ assertTrue(store.isAtomicRenameKey(s));
+ assertTrue(store.isAtomicRenameKey(uriPrefix + s));
+ }
+ }
+
+ /**
+ * Tests fs.mkdir() function to create a target blob while another thread
+ * is holding the lease on the blob. mkdir should not fail since the blob
+ * already exists.
+ * This is a scenario that would happen in HBase distributed log splitting.
+ * Multiple threads will try to create and update "recovered.edits" folder
+ * under the same path.
+ */
+ @Test
+ public void testMkdirOnExistingFolderWithLease() throws Exception {
+ SelfRenewingLease lease;
+ // Create the folder
+ Path path = methodPath();
+ fs.mkdirs(path);
+ NativeAzureFileSystem nfs = fs;
+ String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
+ AzureNativeFileSystemStore store = nfs.getStore();
+ // Acquire the lease on the folder
+ lease = store.acquireLease(fullKey);
+ assertNotNull("lease ID", lease.getLeaseID() != null);
+ // Try to create the same folder
+ store.storeEmptyFolder(fullKey,
+ nfs.createPermissionStatus(FsPermission.getDirDefault()));
+ lease.free();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java
new file mode 100644
index 0000000..b63aaf0
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutOfBandAzureBlobOperationsLive.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
+
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+/**
+ * Live blob operations.
+ */
+public class ITestOutOfBandAzureBlobOperationsLive extends AbstractWasbTestBase {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>")
+ // eg oob creation of "user/<name>/testFolder/a/input/file"
+ // Then wasb creation of "user/<name>/testFolder/a/output" fails
+ @Test
+ public void outOfBandFolder_uncleMkdirs() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder1/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
+
+ Path targetFolder = new Path("testFolder1/a/output");
+ assertTrue(fs.mkdirs(targetFolder));
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_parentDelete() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder2/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
+
+ Path targetFolder = new Path("testFolder2/a/input");
+ assertTrue(fs.delete(targetFolder, true));
+ }
+
+ @Test
+ public void outOfBandFolder_rootFileDelete() throws Exception {
+
+ CloudBlockBlob blob = testAccount.getBlobReference("fileY");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("/fileY")));
+ assertTrue(fs.delete(new Path("/fileY"), true));
+ }
+
+ @Test
+ public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
+
+ CloudBlockBlob blob = testAccount.getBlobReference("folderW/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("/folderW")));
+ assertTrue(fs.exists(new Path("/folderW/file")));
+ assertTrue(fs.delete(new Path("/folderW"), true));
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_siblingCreate() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder3/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+ assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
+
+ Path targetFile = new Path("testFolder3/a/input/file2");
+ FSDataOutputStream s2 = fs.create(targetFile);
+ s2.close();
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ // creating a new file in the root folder
+ @Test
+ public void outOfBandFolder_create_rootDir() throws Exception {
+ Path targetFile = new Path("/newInRoot");
+ FSDataOutputStream s2 = fs.create(targetFile);
+ s2.close();
+ }
+
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_rename() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/"
+ + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ + "testFolder4/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+
+ Path srcFilePath = new Path("testFolder4/a/input/file");
+ assertTrue(fs.exists(srcFilePath));
+
+ Path destFilePath = new Path("testFolder4/a/input/file2");
+ fs.rename(srcFilePath, destFilePath);
+ }
+
+ // Verify that you can rename a file which is the only file in an implicit folder in the
+ // WASB file system.
+ // scenario for this particular test described at MONARCH-HADOOP-892
+ @Test
+ public void outOfBandSingleFile_rename() throws Exception {
+
+ //NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
+ CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+
+ Path srcFilePath = new Path("testFolder5/a/input/file");
+ assertTrue(fs.exists(srcFilePath));
+
+ Path destFilePath = new Path("testFolder5/file2");
+ fs.rename(srcFilePath, destFilePath);
+ }
+
+ // WASB must force explicit parent directories in create, delete, mkdirs, rename.
+ // scenario for this particular test described at MONARCH-HADOOP-764
+ @Test
+ public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
+
+ // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
+ // WASB driver methods prepend working directory implicitly.
+ CloudBlockBlob blob = testAccount.getBlobReference("fileX");
+ BlobOutputStream s = blob.openOutputStream();
+ s.close();
+
+ Path srcFilePath = new Path("/fileX");
+ assertTrue(fs.exists(srcFilePath));
+
+ Path destFilePath = new Path("/fileXrename");
+ fs.rename(srcFilePath, destFilePath);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java
new file mode 100644
index 0000000..f2af116
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java
@@ -0,0 +1,341 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils .*;
+
+/**
+ * Write data into a page blob and verify you can read back all of it
+ * or just a part of it.
+ */
+public class ITestReadAndSeekPageBlobAfterWrite extends AbstractAzureScaleTest {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ITestReadAndSeekPageBlobAfterWrite.class);
+
+ private FileSystem fs;
+ private byte[] randomData;
+
+ // Page blob physical page size
+ private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
+
+ // Size of data on page (excluding header)
+ private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
+ private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
+ private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
+ private Random rand = new Random();
+
+ // A key with a prefix under /pageBlobs, which for the test file system will
+ // force use of a page blob.
+ private static final String KEY = "/pageBlobs/file.dat";
+
+ // path of page blob file to read and write
+ private Path blobPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ fs = getTestAccount().getFileSystem();
+ // Make sure we are using an integral number of pages.
+ assertEquals(0, MAX_BYTES % PAGE_SIZE);
+
+ // load an in-memory array of random data
+ randomData = new byte[PAGE_SIZE * MAX_PAGES];
+ rand.nextBytes(randomData);
+
+ blobPath = blobPath("ITestReadAndSeekPageBlobAfterWrite");
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ deleteQuietly(fs, blobPath, true);
+ super.tearDown();
+ }
+
+ /**
+ * Make sure the file name (key) is a page blob file name. If anybody changes that,
+ * we need to come back and update this test class.
+ */
+ @Test
+ public void testIsPageBlobFileName() {
+ AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
+ String[] a = blobPath.toUri().getPath().split("/");
+ String key2 = a[1] + "/";
+ assertTrue("Not a page blob: " + blobPath, store.isPageBlobKey(key2));
+ }
+
+ /**
+ * For a set of different file sizes, write some random data to a page blob,
+ * read it back, and compare that what was read is the same as what was written.
+ */
+ @Test
+ public void testReadAfterWriteRandomData() throws IOException {
+
+ // local shorthand
+ final int pds = PAGE_DATA_SIZE;
+
+ // Test for sizes at and near page boundaries
+ int[] dataSizes = {
+
+ // on first page
+ 0, 1, 2, 3,
+
+ // Near first physical page boundary (because the implementation
+ // stores PDS + the page header size bytes on each page).
+ pds - 1, pds, pds + 1, pds + 2, pds + 3,
+
+ // near second physical page boundary
+ (2 * pds) - 1, (2 * pds), (2 * pds) + 1, (2 * pds) + 2, (2 * pds) + 3,
+
+ // near tenth physical page boundary
+ (10 * pds) - 1, (10 * pds), (10 * pds) + 1, (10 * pds) + 2, (10 * pds) + 3,
+
+ // test one big size, >> 4MB (an internal buffer size in the code)
+ MAX_BYTES
+ };
+
+ for (int i : dataSizes) {
+ testReadAfterWriteRandomData(i);
+ }
+ }
+
+ private void testReadAfterWriteRandomData(int size) throws IOException {
+ writeRandomData(size);
+ readRandomDataAndVerify(size);
+ }
+
+ /**
+ * Read "size" bytes of data and verify that what was read and what was written
+ * are the same.
+ */
+ private void readRandomDataAndVerify(int size) throws AzureException, IOException {
+ byte[] b = new byte[size];
+ FSDataInputStream stream = fs.open(blobPath);
+ int bytesRead = stream.read(b);
+ stream.close();
+ assertEquals(bytesRead, size);
+
+ // compare the data read to the data written
+ assertTrue(comparePrefix(randomData, b, size));
+ }
+
+ // return true if the beginning "size" values of the arrays are the same
+ private boolean comparePrefix(byte[] a, byte[] b, int size) {
+ if (a.length < size || b.length < size) {
+ return false;
+ }
+ for (int i = 0; i < size; i++) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Write a specified amount of random data to the file path for this test class.
+ private void writeRandomData(int size) throws IOException {
+ OutputStream output = fs.create(blobPath);
+ output.write(randomData, 0, size);
+ output.close();
+ }
+
+ /**
+ * Write data to a page blob, open it, seek, and then read a range of data.
+ * Then compare that the data read from that range is the same as the data originally written.
+ */
+ @Test
+ public void testPageBlobSeekAndReadAfterWrite() throws IOException {
+ writeRandomData(PAGE_SIZE * MAX_PAGES);
+ int recordSize = 100;
+ byte[] b = new byte[recordSize];
+
+
+ try(FSDataInputStream stream = fs.open(blobPath)) {
+ // Seek to a boundary around the middle of the 6th page
+ int seekPosition = 5 * PAGE_SIZE + 250;
+ stream.seek(seekPosition);
+
+ // Read a record's worth of bytes and verify results
+ int bytesRead = stream.read(b);
+ verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
+
+ // Seek to another spot and read a record greater than a page
+ seekPosition = 10 * PAGE_SIZE + 250;
+ stream.seek(seekPosition);
+ recordSize = 1000;
+ b = new byte[recordSize];
+ bytesRead = stream.read(b);
+ verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
+
+ // Read the last 100 bytes of the file
+ recordSize = 100;
+ seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
+ stream.seek(seekPosition);
+ b = new byte[recordSize];
+ bytesRead = stream.read(b);
+ verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
+
+ // Read past the end of the file and we should get only partial data.
+ recordSize = 100;
+ seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
+ stream.seek(seekPosition);
+ b = new byte[recordSize];
+ bytesRead = stream.read(b);
+ assertEquals(50, bytesRead);
+
+ // compare last 50 bytes written with those read
+ byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
+ assertTrue(comparePrefix(tail, b, 50));
+ }
+ }
+
+ // Verify that reading a record of data after seeking gives the expected data.
+ private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
+ byte[] originalRecordData =
+ Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
+ assertEquals(recordSize, bytesRead);
+ assertTrue(comparePrefix(originalRecordData, b, recordSize));
+ }
+
+ // Test many small flushed writes interspersed with periodic hflush calls.
+ // For manual testing, increase NUM_WRITES to a large number.
+ // The goal for a long-running manual test is to make sure that it finishes
+ // and the close() call does not time out. It also facilitates debugging into
+ // hflush/hsync.
+ @Test
+ public void testManySmallWritesWithHFlush() throws IOException {
+ writeAndReadOneFile(50, 100, 20);
+ }
+
+ /**
+ * Write a total of numWrites * recordLength data to a file, read it back,
+ * and check to make sure what was read is the same as what was written.
+ * The syncInterval is the number of writes after which to call hflush to
+ * force the data to storage.
+ */
+ private void writeAndReadOneFile(int numWrites,
+ int recordLength, int syncInterval) throws IOException {
+
+ // A lower bound on the minimum time we think it will take to do
+ // a write to Azure storage.
+ final long MINIMUM_EXPECTED_TIME = 20;
+ LOG.info("Writing " + numWrites * recordLength + " bytes to " + blobPath.getName());
+ FSDataOutputStream output = fs.create(blobPath);
+ int writesSinceHFlush = 0;
+ try {
+
+ // Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
+ // to test concurrent execution gates.
+ output.flush();
+ output.hflush();
+ for (int i = 0; i < numWrites; i++) {
+ output.write(randomData, i * recordLength, recordLength);
+ writesSinceHFlush++;
+ output.flush();
+ if ((i % syncInterval) == 0) {
+ output.hflush();
+ writesSinceHFlush = 0;
+ }
+ }
+ } finally {
+ long start = Time.monotonicNow();
+ output.close();
+ long end = Time.monotonicNow();
+ LOG.debug("close duration = " + (end - start) + " msec.");
+ if (writesSinceHFlush > 0) {
+ assertTrue(String.format(
+ "close duration with >= 1 pending write is %d, less than minimum expected of %d",
+ end - start, MINIMUM_EXPECTED_TIME),
+ end - start >= MINIMUM_EXPECTED_TIME);
+ }
+ }
+
+ // Read the data back and check it.
+ FSDataInputStream stream = fs.open(blobPath);
+ int SIZE = numWrites * recordLength;
+ byte[] b = new byte[SIZE];
+ try {
+ stream.seek(0);
+ stream.read(b, 0, SIZE);
+ verifyReadRandomData(b, SIZE, 0, SIZE);
+ } finally {
+ stream.close();
+ }
+
+ // delete the file
+ fs.delete(blobPath, false);
+ }
+
+ // Test writing to a large file repeatedly as a stress test.
+ // Set the repetitions to a larger number for manual testing
+ // for a longer stress run.
+ @Test
+ public void testLargeFileStress() throws IOException {
+ int numWrites = 32;
+ int recordSize = 1024 * 1024;
+ int syncInterval = 10;
+ int repetitions = 1;
+ for (int i = 0; i < repetitions; i++) {
+ writeAndReadOneFile(numWrites, recordSize, syncInterval);
+ }
+ }
+
+ // Write to a file repeatedly to verify that it extends.
+ // The page blob file should start out at 128MB and finish at 256MB.
+ public void testFileSizeExtension() throws IOException {
+ final int writeSize = 1024 * 1024;
+ final int numWrites = 129;
+ final byte dataByte = 5;
+ byte[] data = new byte[writeSize];
+ Arrays.fill(data, dataByte);
+ try (FSDataOutputStream output = fs.create(blobPath)) {
+ for (int i = 0; i < numWrites; i++) {
+ output.write(data);
+ output.hflush();
+ LOG.debug("total writes = " + (i + 1));
+ }
+ }
+
+ // Show that we wrote more than the default page blob file size.
+ assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
+
+ // Verify we can list the new size. That will prove we expanded the file.
+ FileStatus[] status = fs.listStatus(blobPath);
+ assertEquals("File size hasn't changed " + status,
+ numWrites * writeSize, status[0].getLen());
+ LOG.debug("Total bytes written to " + blobPath + " = " + status[0].getLen());
+ fs.delete(blobPath, false);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java
new file mode 100644
index 0000000..062bc36
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java
@@ -0,0 +1,568 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.ParseException;
+import org.apache.http.HeaderElement;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpGet;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+import org.junit.Assume;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
+
+import java.io.ByteArrayInputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.charset.StandardCharsets;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.times;
+
+/**
+ * Test class to hold all WasbRemoteCallHelper tests.
+ */
+public class ITestWasbRemoteCallHelper
+ extends AbstractWasbTestBase {
+ public static final String EMPTY_STRING = "";
+ private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
+ conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/,http://localhost:8080");
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
+ boolean useAuthorization = fs.getConf()
+ .getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
+ Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
+ useSecureMode && useAuthorization);
+ }
+
+ @Rule
+ public ExpectedException expectedEx = ExpectedException.none();
+
+ /**
+ * Test invalid status-code.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testInvalidStatusCode() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any()))
+ .thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine())
+ .thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test invalid Content-Type.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testInvalidContentType() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "text/plain"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test missing Content-Length.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testMissingContentLength() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test Content-Length exceeds max.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testContentLengthExceedsMax() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "2048"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test invalid Content-Length value
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testInvalidContentLengthValue() throws Throwable {
+
+ setupExpectations();
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "20abc48"));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test valid JSON response.
+ * @throws Throwable
+ */
+ @Test
+ public void testValidJSONResponse() throws Throwable {
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test malformed JSON response.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testMalFormedJSONResponse() throws Throwable {
+
+ expectedEx.expect(WasbAuthorizationException.class);
+ expectedEx.expectMessage("com.fasterxml.jackson.core.JsonParseException: Unexpected end-of-input in FIELD_NAME");
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(malformedJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ /**
+ * Test valid JSON response failure response code.
+ * @throws Throwable
+ */
+ @Test // (expected = WasbAuthorizationException.class)
+ public void testFailureCodeJSONResponse() throws Throwable {
+
+ expectedEx.expect(WasbAuthorizationException.class);
+ expectedEx.expectMessage("Remote authorization service encountered an error Unauthorized");
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+
+ HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
+ Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponse.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity);
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(failureCodeJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+ }
+
+ @Test
+ public void testWhenOneInstanceIsDown() throws Throwable {
+
+ boolean isAuthorizationCachingEnabled = fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, false);
+
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService1.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService1.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService2.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_OK));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService2.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseServiceLocal = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseServiceLocal.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseServiceLocal.getEntity())
+ .thenReturn(mockHttpEntity);
+
+
+
+ class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost1");
+ }
+ }
+ class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost2");
+ }
+ }
+ class HttpGetForServiceLocal extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ try {
+ return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
+ } catch (UnknownHostException e) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost");
+ }
+ }
+ }
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
+ .thenReturn(mockHttpResponseService1);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
+ .thenReturn(mockHttpResponseService2);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal())))
+ .thenReturn(mockHttpResponseServiceLocal);
+
+ //Need 2 times because performop() does 2 fs operations.
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(validJsonResponse()
+ .getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse()
+ .getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(validJsonResponse()
+ .getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+
+ performop(mockHttpClient);
+
+ int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 1 : 2;
+ Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
+ Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForService2()));
+ }
+
+ @Test
+ public void testWhenServiceInstancesAreDown() throws Throwable {
+ //expectedEx.expect(WasbAuthorizationException.class);
+ // set up mocks
+ HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
+ HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
+
+ HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService1.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService1.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService2.getStatusLine())
+ .thenReturn(newStatusLine(
+ HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService2.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ HttpResponse mockHttpResponseService3 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService3.getStatusLine())
+ .thenReturn(newStatusLine(
+ HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService3.getEntity())
+ .thenReturn(mockHttpEntity);
+
+ class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost1");
+ }
+ }
+ class HttpGetForService2 extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost2");
+ }
+ }
+ class HttpGetForService3 extends ArgumentMatcher<HttpGet> {
+ @Override public boolean matches(Object o){
+ try {
+ return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
+ } catch (UnknownHostException e) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost");
+ }
+ }
+ }
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
+ .thenReturn(mockHttpResponseService1);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
+ .thenReturn(mockHttpResponseService2);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService3())))
+ .thenReturn(mockHttpResponseService3);
+
+ //Need 3 times because performop() does 3 fs operations.
+ Mockito.when(mockHttpEntity.getContent())
+ .thenReturn(new ByteArrayInputStream(
+ validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(
+ validJsonResponse().getBytes(StandardCharsets.UTF_8)))
+ .thenReturn(new ByteArrayInputStream(
+ validJsonResponse().getBytes(StandardCharsets.UTF_8)));
+ // finished setting up mocks
+ try {
+ performop(mockHttpClient);
+ }catch (WasbAuthorizationException e){
+ e.printStackTrace();
+ Mockito.verify(mockHttpClient, atLeast(2))
+ .execute(argThat(new HttpGetForService1()));
+ Mockito.verify(mockHttpClient, atLeast(2))
+ .execute(argThat(new HttpGetForService2()));
+ Mockito.verify(mockHttpClient, atLeast(3))
+ .execute(argThat(new HttpGetForService3()));
+ Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any());
+ }
+ }
+
+ private void setupExpectations() {
+ expectedEx.expect(WasbAuthorizationException.class);
+
+ class MatchesPattern extends TypeSafeMatcher<String> {
+ private String pattern;
+
+ MatchesPattern(String pattern) {
+ this.pattern = pattern;
+ }
+
+ @Override protected boolean matchesSafely(String item) {
+ return item.matches(pattern);
+ }
+
+ @Override public void describeTo(Description description) {
+ description.appendText("matches pattern ").appendValue(pattern);
+ }
+
+ @Override protected void describeMismatchSafely(String item,
+ Description mismatchDescription) {
+ mismatchDescription.appendText("does not match");
+ }
+ }
+
+ expectedEx.expectMessage(new MatchesPattern(
+ "org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: "
+ + "Encountered error while making remote call to "
+ + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/,http:\\/\\/localhost:8080 retried 6 time\\(s\\)\\."));
+ }
+
+ private void performop(HttpClient mockHttpClient) throws Throwable {
+
+ Path testPath = new Path("/", "test.dat");
+
+ RemoteWasbAuthorizerImpl authorizer = new RemoteWasbAuthorizerImpl();
+ authorizer.init(fs.getConf());
+ WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper(
+ RetryUtils.getMultipleLinearRandomRetry(new Configuration(),
+ EMPTY_STRING, true,
+ EMPTY_STRING, "1000,3,10000,2"));
+ mockWasbRemoteCallHelper.updateHttpClient(mockHttpClient);
+ authorizer.updateWasbRemoteCallHelper(mockWasbRemoteCallHelper);
+ fs.updateWasbAuthorizer(authorizer);
+
+ fs.create(testPath);
+ ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
+ fs.delete(testPath, false);
+ }
+
+ private String validJsonResponse() {
+ return "{"
+ + "\"responseCode\": 0,"
+ + "\"authorizationResult\": true,"
+ + "\"responseMessage\": \"Authorized\""
+ + "}";
+ }
+
+ private String malformedJsonResponse() {
+ return "{"
+ + "\"responseCode\": 0,"
+ + "\"authorizationResult\": true,"
+ + "\"responseMessage\":";
+ }
+
+ private String failureCodeJsonResponse() {
+ return "{"
+ + "\"responseCode\": 1,"
+ + "\"authorizationResult\": false,"
+ + "\"responseMessage\": \"Unauthorized\""
+ + "}";
+ }
+
+ private StatusLine newStatusLine(int statusCode) {
+ return new StatusLine() {
+ @Override
+ public ProtocolVersion getProtocolVersion() {
+ return new ProtocolVersion("HTTP", 1, 1);
+ }
+
+ @Override
+ public int getStatusCode() {
+ return statusCode;
+ }
+
+ @Override
+ public String getReasonPhrase() {
+ return "Reason Phrase";
+ }
+ };
+ }
+
+ private Header newHeader(String name, String value) {
+ return new Header() {
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public HeaderElement[] getElements() throws ParseException {
+ return new HeaderElement[0];
+ }
+ };
+ }
+
+ /** Check that a HttpGet request is with given remote host. */
+ private static boolean checkHttpGetMatchHost(HttpGet g, String h) {
+ return g != null && g.getURI().getHost().equals(h);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java
new file mode 100644
index 0000000..bee0220
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java
@@ -0,0 +1,610 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.Date;
+import java.util.EnumSet;
+import java.io.File;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+public class ITestWasbUriAndConfiguration extends AbstractWasbTestWithTimeout {
+
+ private static final int FILE_SIZE = 4096;
+ private static final String PATH_DELIMITER = "/";
+
+ protected String accountName;
+ protected String accountKey;
+ protected static Configuration conf = null;
+ private boolean runningInSASMode = false;
+ @Rule
+ public final TemporaryFolder tempDir = new TemporaryFolder();
+
+ private AzureBlobStorageTestAccount testAccount;
+
+ @After
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanupTestAccount(testAccount);
+ }
+
+ @Before
+ public void setMode() {
+ runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
+ getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
+ }
+
+ private boolean validateIOStreams(Path filePath) throws IOException {
+ // Capture the file system from the test account.
+ FileSystem fs = testAccount.getFileSystem();
+ return validateIOStreams(fs, filePath);
+ }
+
+ private boolean validateIOStreams(FileSystem fs, Path filePath)
+ throws IOException {
+
+ // Create and write a file
+ OutputStream outputStream = fs.create(filePath);
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+
+ // Return true if the the count is equivalent to the file size.
+ return (FILE_SIZE == readInputStream(fs, filePath));
+ }
+
+ private int readInputStream(Path filePath) throws IOException {
+ // Capture the file system from the test account.
+ FileSystem fs = testAccount.getFileSystem();
+ return readInputStream(fs, filePath);
+ }
+
+ private int readInputStream(FileSystem fs, Path filePath) throws IOException {
+ // Read the file
+ InputStream inputStream = fs.open(filePath);
+ int count = 0;
+ while (inputStream.read() >= 0) {
+ count++;
+ }
+ inputStream.close();
+
+ // Return true if the the count is equivalent to the file size.
+ return count;
+ }
+
+ // Positive tests to exercise making a connection with to Azure account using
+ // account key.
+ @Test
+ public void testConnectUsingKey() throws Exception {
+
+ testAccount = AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+
+ // Validate input and output on the connection.
+ assertTrue(validateIOStreams(new Path("/wasb_scheme")));
+ }
+
+ @Test
+ public void testConnectUsingSAS() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ // Create the test account with SAS credentials.
+ testAccount = AzureBlobStorageTestAccount.create("",
+ EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer));
+ assumeNotNull(testAccount);
+ // Validate input and output on the connection.
+ // NOTE: As of 4/15/2013, Azure Storage has a deficiency that prevents the
+ // full scenario from working (CopyFromBlob doesn't work with SAS), so
+ // just do a minor check until that is corrected.
+ assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist")));
+ //assertTrue(validateIOStreams(new Path("/sastest.txt")));
+ }
+
+ @Test
+ public void testConnectUsingSASReadonly() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ // Create the test account with SAS credentials.
+ testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(
+ CreateOptions.UseSas, CreateOptions.CreateContainer,
+ CreateOptions.Readonly));
+ assumeNotNull(testAccount);
+
+ // Create a blob in there
+ final String blobKey = "blobForReadonly";
+ CloudBlobContainer container = testAccount.getRealContainer();
+ CloudBlockBlob blob = container.getBlockBlobReference(blobKey);
+ ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] { 1,
+ 2, 3 });
+ blob.upload(inputStream, 3);
+ inputStream.close();
+
+ // Make sure we can read it from the file system
+ Path filePath = new Path("/" + blobKey);
+ FileSystem fs = testAccount.getFileSystem();
+ assertTrue(fs.exists(filePath));
+ byte[] obtained = new byte[3];
+ DataInputStream obtainedInputStream = fs.open(filePath);
+ obtainedInputStream.readFully(obtained);
+ obtainedInputStream.close();
+ assertEquals(3, obtained[2]);
+ }
+
+ @Test
+ public void testConnectUsingAnonymous() throws Exception {
+
+ // Create test account with anonymous credentials
+ testAccount = AzureBlobStorageTestAccount.createAnonymous("testWasb.txt",
+ FILE_SIZE);
+ assumeNotNull(testAccount);
+
+ // Read the file from the public folder using anonymous credentials.
+ assertEquals(FILE_SIZE, readInputStream(new Path("/testWasb.txt")));
+ }
+
+ @Test
+ public void testConnectToEmulator() throws Exception {
+ testAccount = AzureBlobStorageTestAccount.createForEmulator();
+ assumeNotNull(testAccount);
+ assertTrue(validateIOStreams(new Path("/testFile")));
+ }
+
+ /**
+ * Tests that we can connect to fully qualified accounts outside of
+ * blob.core.windows.net
+ */
+ @Test
+ public void testConnectToFullyQualifiedAccountMock() throws Exception {
+ Configuration conf = new Configuration();
+ AzureBlobStorageTestAccount.setMockAccountKey(conf,
+ "mockAccount.mock.authority.net");
+ AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+ MockStorageInterface mockStorage = new MockStorageInterface();
+ store.setAzureStorageInteractionLayer(mockStorage);
+ NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
+ fs.initialize(
+ new URI("wasb://mockContainer@mockAccount.mock.authority.net"), conf);
+ fs.createNewFile(new Path("/x"));
+ assertTrue(mockStorage.getBackingStore().exists(
+ "http://mockAccount.mock.authority.net/mockContainer/x"));
+ fs.close();
+ }
+
+ public void testConnectToRoot() throws Exception {
+
+ // Set up blob names.
+ final String blobPrefix = String.format("wasbtests-%s-%tQ-blob",
+ System.getProperty("user.name"), new Date());
+ final String inblobName = blobPrefix + "_In" + ".txt";
+ final String outblobName = blobPrefix + "_Out" + ".txt";
+
+ // Create test account with default root access.
+ testAccount = AzureBlobStorageTestAccount.createRoot(inblobName, FILE_SIZE);
+ assumeNotNull(testAccount);
+
+ // Read the file from the default container.
+ assertEquals(FILE_SIZE, readInputStream(new Path(PATH_DELIMITER
+ + inblobName)));
+
+ try {
+ // Capture file system.
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Create output path and open an output stream to the root folder.
+ Path outputPath = new Path(PATH_DELIMITER + outblobName);
+ OutputStream outputStream = fs.create(outputPath);
+ fail("Expected an AzureException when writing to root folder.");
+ outputStream.write(new byte[FILE_SIZE]);
+ outputStream.close();
+ } catch (AzureException e) {
+ assertTrue(true);
+ } catch (Exception e) {
+ String errMsg = String.format(
+ "Expected AzureException but got %s instead.", e);
+ assertTrue(errMsg, false);
+ }
+ }
+
+ // Positive tests to exercise throttling I/O path. Connections are made to an
+ // Azure account using account key.
+ //
+ public void testConnectWithThrottling() throws Exception {
+
+ testAccount = AzureBlobStorageTestAccount.createThrottled();
+
+ // Validate input and output on the connection.
+ assertTrue(validateIOStreams(new Path("/wasb_scheme")));
+ }
+
+ /**
+ * Creates a file and writes a single byte with the given value in it.
+ */
+ private static void writeSingleByte(FileSystem fs, Path testFile, int toWrite)
+ throws Exception {
+ OutputStream outputStream = fs.create(testFile);
+ outputStream.write(toWrite);
+ outputStream.close();
+ }
+
+ /**
+ * Reads the file given and makes sure that it's a single-byte file with the
+ * given value in it.
+ */
+ private static void assertSingleByteValue(FileSystem fs, Path testFile,
+ int expectedValue) throws Exception {
+ InputStream inputStream = fs.open(testFile);
+ int byteRead = inputStream.read();
+ assertTrue("File unexpectedly empty: " + testFile, byteRead >= 0);
+ assertTrue("File has more than a single byte: " + testFile,
+ inputStream.read() < 0);
+ inputStream.close();
+ assertEquals("Unxpected content in: " + testFile, expectedValue, byteRead);
+ }
+
+ @Test
+ public void testMultipleContainers() throws Exception {
+ AzureBlobStorageTestAccount firstAccount = AzureBlobStorageTestAccount
+ .create("first"), secondAccount = AzureBlobStorageTestAccount
+ .create("second");
+ assumeNotNull(firstAccount);
+ assumeNotNull(secondAccount);
+ try {
+ FileSystem firstFs = firstAccount.getFileSystem(),
+ secondFs = secondAccount.getFileSystem();
+ Path testFile = new Path("/testWasb");
+ assertTrue(validateIOStreams(firstFs, testFile));
+ assertTrue(validateIOStreams(secondFs, testFile));
+ // Make sure that we're really dealing with two file systems here.
+ writeSingleByte(firstFs, testFile, 5);
+ writeSingleByte(secondFs, testFile, 7);
+ assertSingleByteValue(firstFs, testFile, 5);
+ assertSingleByteValue(secondFs, testFile, 7);
+ } finally {
+ firstAccount.cleanup();
+ secondAccount.cleanup();
+ }
+ }
+
+ @Test
+ public void testDefaultKeyProvider() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+ String key = "testkey";
+
+ conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
+
+ String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
+ account, conf);
+ assertEquals(key, result);
+ }
+
+ @Test
+ public void testCredsFromCredentialProvider() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ String account = "testacct";
+ String key = "testkey";
+ // set up conf to have a cred provider
+ final Configuration conf = new Configuration();
+ final File file = tempDir.newFile("test.jks");
+ final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
+ file.toURI());
+ conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+ jks.toString());
+
+ provisionAccountKey(conf, account, key);
+
+ // also add to configuration as clear text that should be overridden
+ conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,
+ key + "cleartext");
+
+ String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
+ account, conf);
+ // result should contain the credential provider key not the config key
+ assertEquals("AccountKey incorrect.", key, result);
+ }
+
+ void provisionAccountKey(
+ final Configuration conf, String account, String key) throws Exception {
+ // add our creds to the provider
+ final CredentialProvider provider =
+ CredentialProviderFactory.getProviders(conf).get(0);
+ provider.createCredentialEntry(
+ SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key.toCharArray());
+ provider.flush();
+ }
+
+ @Test
+ public void testValidKeyProvider() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+ String key = "testkey";
+
+ conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
+ conf.setClass("fs.azure.account.keyprovider." + account,
+ SimpleKeyProvider.class, KeyProvider.class);
+ String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
+ account, conf);
+ assertEquals(key, result);
+ }
+
+ @Test
+ public void testInvalidKeyProviderNonexistantClass() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+
+ conf.set("fs.azure.account.keyprovider." + account,
+ "org.apache.Nonexistant.Class");
+ try {
+ AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
+ Assert.fail("Nonexistant key provider class should have thrown a "
+ + "KeyProviderException");
+ } catch (KeyProviderException e) {
+ }
+ }
+
+ @Test
+ public void testInvalidKeyProviderWrongClass() throws Exception {
+ Configuration conf = new Configuration();
+ String account = "testacct";
+
+ conf.set("fs.azure.account.keyprovider." + account, "java.lang.String");
+ try {
+ AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
+ Assert.fail("Key provider class that doesn't implement KeyProvider "
+ + "should have thrown a KeyProviderException");
+ } catch (KeyProviderException e) {
+ }
+ }
+
+ /**
+ * Tests the cases when the URI is specified with no authority, i.e.
+ * wasb:///path/to/file.
+ */
+ @Test
+ public void testNoUriAuthority() throws Exception {
+ // For any combination of default FS being asv(s)/wasb(s)://c@a/ and
+ // the actual URI being asv(s)/wasb(s):///, it should work.
+
+ String[] wasbAliases = new String[] { "wasb", "wasbs" };
+ for (String defaultScheme : wasbAliases) {
+ for (String wantedScheme : wasbAliases) {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI(defaultScheme, authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ // Add references to file system implementations for wasb and wasbs.
+ conf.addResource("azure-test.xml");
+ URI wantedUri = new URI(wantedScheme + ":///random/path");
+ NativeAzureFileSystem obtained = (NativeAzureFileSystem) FileSystem
+ .get(wantedUri, conf);
+ assertNotNull(obtained);
+ assertEquals(new URI(wantedScheme, authority, null, null, null),
+ obtained.getUri());
+ // Make sure makeQualified works as expected
+ Path qualified = obtained.makeQualified(new Path(wantedUri));
+ assertEquals(new URI(wantedScheme, authority, wantedUri.getPath(),
+ null, null), qualified.toUri());
+ // Cleanup for the next iteration to not cache anything in FS
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+ // If the default FS is not a WASB FS, then specifying a URI without
+ // authority for the Azure file system should throw.
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ conf.set(FS_DEFAULT_NAME_KEY, "file:///");
+ try {
+ FileSystem.get(new URI("wasb:///random/path"), conf);
+ fail("Should've thrown.");
+ } catch (IllegalArgumentException e) {
+ }
+ }
+
+ @Test
+ public void testWasbAsDefaultFileSystemHasNoPort() throws Exception {
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasb", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.addResource("azure-test.xml");
+
+ FileSystem fs = FileSystem.get(conf);
+ assertTrue(fs instanceof NativeAzureFileSystem);
+ assertEquals(-1, fs.getUri().getPort());
+
+ AbstractFileSystem afs = FileContext.getFileContext(conf)
+ .getDefaultFileSystem();
+ assertTrue(afs instanceof Wasb);
+ assertEquals(-1, afs.getUri().getPort());
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+
+ /**
+ * Tests the cases when the scheme specified is 'wasbs'.
+ */
+ @Test
+ public void testAbstractFileSystemImplementationForWasbsScheme() throws Exception {
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+ conf.addResource("azure-test.xml");
+
+ FileSystem fs = FileSystem.get(conf);
+ assertTrue(fs instanceof NativeAzureFileSystem);
+ assertEquals("wasbs", fs.getScheme());
+
+ AbstractFileSystem afs = FileContext.getFileContext(conf)
+ .getDefaultFileSystem();
+ assertTrue(afs instanceof Wasbs);
+ assertEquals(-1, afs.getUri().getPort());
+ assertEquals("wasbs", afs.getUri().getScheme());
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+
+ @Test
+ public void testNoAbstractFileSystemImplementationSpecifiedForWasbsScheme() throws Exception {
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+
+ FileSystem fs = FileSystem.get(conf);
+ assertTrue(fs instanceof NativeAzureFileSystem);
+ assertEquals("wasbs", fs.getScheme());
+
+ // should throw if 'fs.AbstractFileSystem.wasbs.impl'' is not specified
+ try{
+ FileContext.getFileContext(conf).getDefaultFileSystem();
+ fail("Should've thrown.");
+ }catch(UnsupportedFileSystemException e){
+ }
+
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+
+ @Test
+ public void testCredentialProviderPathExclusions() throws Exception {
+ String providerPath =
+ "user:///,jceks://wasb/user/hrt_qa/sqoopdbpasswd.jceks," +
+ "jceks://hdfs@nn1.example.com/my/path/test.jceks";
+ Configuration config = new Configuration();
+ config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+ providerPath);
+ String newPath = "user:///,jceks://hdfs@nn1.example.com/my/path/test.jceks";
+
+ excludeAndTestExpectations(config, newPath);
+ }
+
+ @Test
+ public void testExcludeAllProviderTypesFromConfig() throws Exception {
+ String providerPath =
+ "jceks://wasb/tmp/test.jceks," +
+ "jceks://wasb@/my/path/test.jceks";
+ Configuration config = new Configuration();
+ config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+ providerPath);
+ String newPath = null;
+
+ excludeAndTestExpectations(config, newPath);
+ }
+
+ void excludeAndTestExpectations(Configuration config, String newPath)
+ throws Exception {
+ Configuration conf = ProviderUtils.excludeIncompatibleCredentialProviders(
+ config, NativeAzureFileSystem.class);
+ String effectivePath = conf.get(
+ CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
+ assertEquals(newPath, effectivePath);
+ }
+
+ @Test
+ public void testUserAgentConfig() throws Exception {
+ // Set the user agent
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+
+ conf.set(AzureNativeFileSystemStore.USER_AGENT_ID_KEY, "TestClient");
+
+ FileSystem fs = FileSystem.get(conf);
+ AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
+
+ assertTrue(afs instanceof Wasbs);
+ assertEquals(-1, afs.getUri().getPort());
+ assertEquals("wasbs", afs.getUri().getScheme());
+
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+
+ // Unset the user agent
+ try {
+ testAccount = AzureBlobStorageTestAccount.createMock();
+ Configuration conf = testAccount.getFileSystem().getConf();
+ String authority = testAccount.getFileSystem().getUri().getAuthority();
+ URI defaultUri = new URI("wasbs", authority, null, null, null);
+ conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+
+ conf.unset(AzureNativeFileSystemStore.USER_AGENT_ID_KEY);
+
+ FileSystem fs = FileSystem.get(conf);
+ AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
+ assertTrue(afs instanceof Wasbs);
+ assertEquals(-1, afs.getUri().getPort());
+ assertEquals("wasbs", afs.getUri().getScheme());
+
+ } finally {
+ testAccount.cleanup();
+ FileSystem.closeAll();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
index 9fbab49..7354499 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
@@ -38,11 +38,12 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
private boolean performOwnerMatch;
private CachingAuthorizer<CachedAuthorizerEntry, Boolean> cache;
- // The full qualified URL to the root directory
+ // The full qualified URL to the root directory
private String qualifiedPrefixUrl;
public MockWasbAuthorizerImpl(NativeAzureFileSystem fs) {
- qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory())
+ qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(),
+ fs.getWorkingDirectory())
.toString().replaceAll("/$", "");
cache = new CachingAuthorizer<>(TimeUnit.MINUTES.convert(5L, TimeUnit.MINUTES), "AUTHORIZATION");
}
@@ -64,19 +65,23 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
public void addAuthRule(String wasbAbsolutePath,
String accessType, boolean access) {
- wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
- AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
- ? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"), accessType)
+ wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
+ AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
+ ? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"),
+ accessType)
: new AuthorizationComponent(wasbAbsolutePath, accessType);
this.authRules.put(component, access);
}
@Override
- public boolean authorize(String wasbAbsolutePath, String accessType, String owner)
+ public boolean authorize(String wasbAbsolutePath,
+ String accessType,
+ String owner)
throws WasbAuthorizationException {
- if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
+ if (wasbAbsolutePath.endsWith(
+ NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
return true;
}
@@ -108,20 +113,23 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
// In case of root("/"), owner match does not happen because owner is returned as empty string.
// we try to force owner match just for purpose of tests to make sure all operations work seemlessly with owner.
if (this.performOwnerMatch
- && StringUtils.equalsIgnoreCase(wasbAbsolutePath, qualifiedPrefixUrl + "/")) {
+ && StringUtils.equalsIgnoreCase(wasbAbsolutePath,
+ qualifiedPrefixUrl + "/")) {
owner = currentUserShortName;
}
boolean shouldEvaluateOwnerAccess = owner != null && !owner.isEmpty()
- && this.performOwnerMatch;
+ && this.performOwnerMatch;
- boolean isOwnerMatch = StringUtils.equalsIgnoreCase(currentUserShortName, owner);
+ boolean isOwnerMatch = StringUtils.equalsIgnoreCase(currentUserShortName,
+ owner);
AuthorizationComponent component =
new AuthorizationComponent(wasbAbsolutePath, accessType);
if (authRules.containsKey(component)) {
- return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(component) : authRules.get(component);
+ return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(
+ component) : authRules.get(component);
} else {
// Regex-pattern match if we don't have a straight match
for (Map.Entry<AuthorizationComponent, Boolean> entry : authRules.entrySet()) {
@@ -129,8 +137,11 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
String keyPath = key.getWasbAbsolutePath();
String keyAccess = key.getAccessType();
- if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath) && keyAccess.equals(accessType)) {
- return shouldEvaluateOwnerAccess ? isOwnerMatch && entry.getValue() : entry.getValue();
+ if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath)
+ && keyAccess.equals(accessType)) {
+ return shouldEvaluateOwnerAccess
+ ? isOwnerMatch && entry.getValue()
+ : entry.getValue();
}
}
return false;
@@ -141,47 +152,47 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
authRules.clear();
cache.clear();
}
-}
-class AuthorizationComponent {
+ private static class AuthorizationComponent {
- private String wasbAbsolutePath;
- private String accessType;
+ private final String wasbAbsolutePath;
+ private final String accessType;
- public AuthorizationComponent(String wasbAbsolutePath,
- String accessType) {
- this.wasbAbsolutePath = wasbAbsolutePath;
- this.accessType = accessType;
- }
+ AuthorizationComponent(String wasbAbsolutePath,
+ String accessType) {
+ this.wasbAbsolutePath = wasbAbsolutePath;
+ this.accessType = accessType;
+ }
- @Override
- public int hashCode() {
- return this.wasbAbsolutePath.hashCode() ^ this.accessType.hashCode();
- }
+ @Override
+ public int hashCode() {
+ return this.wasbAbsolutePath.hashCode() ^ this.accessType.hashCode();
+ }
- @Override
- public boolean equals(Object obj) {
+ @Override
+ public boolean equals(Object obj) {
- if (obj == this) {
- return true;
- }
+ if (obj == this) {
+ return true;
+ }
- if (obj == null
- || !(obj instanceof AuthorizationComponent)) {
- return false;
- }
+ if (obj == null
+ || !(obj instanceof AuthorizationComponent)) {
+ return false;
+ }
- return ((AuthorizationComponent)obj).
- getWasbAbsolutePath().equals(this.wasbAbsolutePath)
- && ((AuthorizationComponent)obj).
- getAccessType().equals(this.accessType);
- }
+ return ((AuthorizationComponent) obj).
+ getWasbAbsolutePath().equals(this.wasbAbsolutePath)
+ && ((AuthorizationComponent) obj).
+ getAccessType().equals(this.accessType);
+ }
- public String getWasbAbsolutePath() {
- return this.wasbAbsolutePath;
- }
+ public String getWasbAbsolutePath() {
+ return this.wasbAbsolutePath;
+ }
- public String getAccessType() {
- return accessType;
+ public String getAccessType() {
+ return accessType;
+ }
}
-}
\ No newline at end of file
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a3f2843..4bf6f04 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assume;
-import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -49,8 +48,8 @@ public class TestNativeAzureFileSystemAuthorization
protected MockWasbAuthorizerImpl authorizer;
@Override
- public Configuration getConfiguration() {
- Configuration conf = super.getConfiguration();
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost/");
conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "user1 , user2");
@@ -59,13 +58,12 @@ public class TestNativeAzureFileSystemAuthorization
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = getConfiguration();
- return AzureBlobStorageTestAccount.create(conf);
+ return AzureBlobStorageTestAccount.create(createConfiguration());
}
-
- @Before
- public void beforeMethod() {
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
@@ -76,7 +74,6 @@ public class TestNativeAzureFileSystemAuthorization
fs.updateWasbAuthorizer(authorizer);
}
-
@Rule
public ExpectedException expectedEx = ExpectedException.none();
@@ -95,7 +92,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Setup the expected exception class, and exception message that the test is supposed to fail with
+ * Setup the expected exception class, and exception message that the test is supposed to fail with.
*/
protected void setExpectedFailureMessage(String operation, Path path) {
expectedEx.expect(WasbAuthorizationException.class);
@@ -104,7 +101,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify Create access check
+ * Positive test to verify Create access check.
* The file is created directly under an existing folder.
* No intermediate folders need to be created.
* @throws Throwable
@@ -128,7 +125,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify Create access check
+ * Positive test to verify Create access check.
* The test tries to create a file whose parent is non-existent to ensure that
* the intermediate folders between ancestor and direct parent are being created
* when proper ranger policies are configured.
@@ -155,7 +152,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
- * Negative test to verify that create fails when trying to overwrite an existing file
+ * Negative test to verify that create fails when trying to overwrite an existing file.
* without proper write permissions on the file being overwritten.
* @throws Throwable
*/
@@ -181,7 +178,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify that create succeeds when trying to overwrite an existing file
+ * Positive test to verify that create succeeds when trying to overwrite an existing file.
* when proper write permissions on the file being overwritten are provided.
* @throws Throwable
*/
@@ -232,7 +229,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify listStatus access check
+ * Positive test to verify listStatus access check.
* @throws Throwable
*/
@Test
@@ -257,7 +254,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test to verify listStatus access check
+ * Negative test to verify listStatus access check.
* @throws Throwable
*/
@@ -342,7 +339,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test to verify rename access check - the dstFolder disallows rename
+ * Negative test to verify rename access check - the dstFolder disallows rename.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@@ -373,7 +370,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify rename access check - the dstFolder allows rename
+ * Positive test to verify rename access check - the dstFolder allows rename.
* @throws Throwable
*/
@Test
@@ -484,7 +481,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify file delete access check
+ * Positive test to verify file delete access check.
* @throws Throwable
*/
@Test
@@ -506,7 +503,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test to verify file delete access check
+ * Negative test to verify file delete access check.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@@ -544,7 +541,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Positive test to verify file delete access check, with intermediate folders
- * Uses wildcard recursive permissions
+ * Uses wildcard recursive permissions.
* @throws Throwable
*/
@Test
@@ -582,7 +579,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test for mkdirs access check
+ * Positive test for mkdirs access check.
* @throws Throwable
*/
@Test
@@ -668,7 +665,7 @@ public class TestNativeAzureFileSystemAuthorization
}
}
/**
- * Negative test for mkdirs access check
+ * Negative test for mkdirs access check.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@@ -692,7 +689,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test triple slash format (wasb:///) access check
+ * Positive test triple slash format (wasb:///) access check.
* @throws Throwable
*/
@Test
@@ -708,7 +705,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test for setOwner when Authorization is enabled
+ * Negative test for setOwner when Authorization is enabled.
*/
@Test
public void testSetOwnerThrowsForUnauthorisedUsers() throws Throwable {
@@ -744,7 +741,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Test for setOwner when Authorization is enabled and
- * the user is specified in chown allowed user list
+ * the user is specified in chown allowed user list.
* */
@Test
public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable {
@@ -785,7 +782,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Test for setOwner when Authorization is enabled and
- * the userlist is specified as '*'
+ * the userlist is specified as '*'.
* */
@Test
public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throwable {
@@ -829,7 +826,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/** Test for setOwner throws for illegal setup of chown
- * allowed testSetOwnerSucceedsForAuthorisedUsers
+ * allowed testSetOwnerSucceedsForAuthorisedUsers.
*/
@Test
public void testSetOwnerFailsForIllegalSetup() throws Throwable {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
deleted file mode 100644
index 4bd4633..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.Test;
-import org.junit.Before;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test class that runs wasb authorization tests with owner check enabled.
- */
-public class TestNativeAzureFileSystemAuthorizationWithOwner
- extends TestNativeAzureFileSystemAuthorization {
-
- @Before
- public void beforeMethod() {
- super.beforeMethod();
- authorizer.init(fs.getConf(), true);
- }
-
- /**
- * Test case when owner matches current user
- */
- @Test
- public void testOwnerPermissionPositive() throws Throwable {
-
- Path parentDir = new Path("/testOwnerPermissionPositive");
- Path testPath = new Path(parentDir, "test.data");
-
- authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
- authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
- // additional rule used for assertPathExists
- authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.READ.toString(), true);
- fs.updateWasbAuthorizer(authorizer);
-
- try {
- // creates parentDir with owner as current user
- fs.mkdirs(parentDir);
- ContractTestUtils.assertPathExists(fs, "parentDir does not exist", parentDir);
-
- fs.create(testPath);
- fs.getFileStatus(testPath);
- ContractTestUtils.assertPathExists(fs, "testPath does not exist", testPath);
-
- } finally {
- allowRecursiveDelete(fs, parentDir.toString());
- fs.delete(parentDir, true);
- }
- }
-
- /**
- * Negative test case for owner does not match current user
- */
- @Test
- public void testOwnerPermissionNegative() throws Throwable {
- expectedEx.expect(WasbAuthorizationException.class);
-
- Path parentDir = new Path("/testOwnerPermissionNegative");
- Path childDir = new Path(parentDir, "childDir");
-
- setExpectedFailureMessage("mkdirs", childDir);
-
- authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
-
- fs.updateWasbAuthorizer(authorizer);
-
- try{
- fs.mkdirs(parentDir);
- UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting(
- "testuser", new String[] {});
-
- ugiSuperUser.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- fs.mkdirs(childDir);
- return null;
- }
- });
-
- } finally {
- allowRecursiveDelete(fs, parentDir.toString());
- fs.delete(parentDir, true);
- }
- }
-
- /**
- * Test to verify that retrieving owner information does not
- * throw when file/folder does not exist
- */
- @Test
- public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {
-
- Path testdirectory = new Path("/testDirectory123454565");
-
- String owner = fs.getOwnerForPath(testdirectory);
- assertEquals("", owner);
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
index b2660bb..b280cac 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
@@ -29,7 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
-public class TestNativeAzureFileSystemBlockLocations {
+/**
+ * Test block location logic.
+ */
+public class TestNativeAzureFileSystemBlockLocations
+ extends AbstractWasbTestWithTimeout {
@Test
public void testNumberOfBlocks() throws Exception {
Configuration conf = new Configuration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
deleted file mode 100644
index 4114e60..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.net.URI;
-import java.util.StringTokenizer;
-
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Logger;
-import org.junit.Test;
-
-/**
- * Test to validate Azure storage client side logging. Tests works only when
- * testing with Live Azure storage because Emulator does not have support for
- * client-side logging.
- *
- */
-public class TestNativeAzureFileSystemClientLogging
- extends AbstractWasbTestBase {
-
- private AzureBlobStorageTestAccount testAccount;
-
- // Core-site config controlling Azure Storage Client logging
- private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
-
- // Temporary directory created using WASB.
- private static final String TEMP_DIR = "tempDir";
-
- /*
- * Helper method to verify the client logging is working. This check primarily
- * checks to make sure we see a line in the logs corresponding to the entity
- * that is created during test run.
- */
- private boolean verifyStorageClientLogs(String capturedLogs, String entity)
- throws Exception {
-
- URI uri = testAccount.getRealAccount().getBlobEndpoint();
- String container = testAccount.getRealContainer().getName();
- String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
- + entity;
- boolean entityFound = false;
-
- StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
-
- while (tokenizer.hasMoreTokens()) {
- String token = tokenizer.nextToken();
- if (token.contains(validateString)) {
- entityFound = true;
- break;
- }
- }
- return entityFound;
- }
-
- /*
- * Helper method that updates the core-site config to enable/disable logging.
- */
- private void updateFileSystemConfiguration(Boolean loggingFlag)
- throws Exception {
-
- Configuration conf = fs.getConf();
- conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
- }
-
- // Using WASB code to communicate with Azure Storage.
- private void performWASBOperations() throws Exception {
-
- Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
- fs.mkdirs(tempDir);
- fs.delete(tempDir, true);
- }
-
- @Test
- public void testLoggingEnabled() throws Exception {
-
- LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
- .getRootLogger()));
-
- // Update configuration based on the Test.
- updateFileSystemConfiguration(true);
-
- performWASBOperations();
-
- String output = getLogOutput(logs);
- assertTrue("Log entry " + TEMP_DIR + " not found in " + output,
- verifyStorageClientLogs(output, TEMP_DIR));
- }
-
- protected String getLogOutput(LogCapturer logs) {
- String output = logs.getOutput();
- assertTrue("No log created/captured", !output.isEmpty());
- return output;
- }
-
- @Test
- public void testLoggingDisabled() throws Exception {
-
- LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
- .getRootLogger()));
-
- // Update configuration based on the Test.
- updateFileSystemConfiguration(false);
-
- performWASBOperations();
- String output = getLogOutput(logs);
-
- assertFalse("Log entry " + TEMP_DIR + " found in " + output,
- verifyStorageClientLogs(output, TEMP_DIR));
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- return testAccount;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
index cbfc563..655ae90 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
@@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
@@ -33,32 +28,30 @@ import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
-public class TestNativeAzureFileSystemConcurrency {
- private AzureBlobStorageTestAccount testAccount;
- private FileSystem fs;
+public class TestNativeAzureFileSystemConcurrency extends AbstractWasbTestBase {
private InMemoryBlockBlobStore backingStore;
- @Before
+ @Override
public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createMock();
- fs = testAccount.getFileSystem();
- backingStore = testAccount.getMockStorage().getBackingStore();
+ super.setUp();
+ backingStore = getTestAccount().getMockStorage().getBackingStore();
}
- @After
+ @Override
public void tearDown() throws Exception {
- testAccount.cleanup();
- fs = null;
+ super.tearDown();
backingStore = null;
}
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createMock();
+ }
+
@Test
public void testLinkBlobs() throws Exception {
Path filePath = new Path("/inProgress");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
deleted file mode 100644
index 7c5899d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-/***
- * Test class to hold all Live Azure storage concurrency tests.
- */
-public class TestNativeAzureFileSystemConcurrencyLive
- extends AbstractWasbTestBase {
-
- private static final int THREAD_COUNT = 102;
- private static final int TEST_EXECUTION_TIMEOUT = 5000;
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- /**
- * Validate contract for FileSystem.create when overwrite is true and there
- * are concurrent callers of FileSystem.delete. An existing file should be
- * overwritten, even if the original destination exists but is deleted by an
- * external agent during the create operation.
- */
- @Test(timeout = TEST_EXECUTION_TIMEOUT)
- public void testConcurrentCreateDeleteFile() throws Exception {
- Path testFile = new Path("test.dat");
-
- List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
-
- for (int i = 0; i < THREAD_COUNT; i++) {
- tasks.add(new CreateFileTask(fs, testFile));
- }
-
- ExecutorService es = null;
-
- try {
- es = Executors.newFixedThreadPool(THREAD_COUNT);
-
- List<Future<Void>> futures = es.invokeAll(tasks);
-
- for (Future<Void> future : futures) {
- Assert.assertTrue(future.isDone());
-
- // we are using Callable<V>, so if an exception
- // occurred during the operation, it will be thrown
- // when we call get
- Assert.assertEquals(null, future.get());
- }
- } finally {
- if (es != null) {
- es.shutdownNow();
- }
- }
- }
-
- /**
- * Validate contract for FileSystem.delete when invoked concurrently.
- * One of the threads should successfully delete the file and return true;
- * all other threads should return false.
- */
- @Test(timeout = TEST_EXECUTION_TIMEOUT)
- public void testConcurrentDeleteFile() throws Exception {
- Path testFile = new Path("test.dat");
- fs.create(testFile).close();
-
- List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT);
-
- for (int i = 0; i < THREAD_COUNT; i++) {
- tasks.add(new DeleteFileTask(fs, testFile));
- }
-
- ExecutorService es = null;
- try {
- es = Executors.newFixedThreadPool(THREAD_COUNT);
-
- List<Future<Boolean>> futures = es.invokeAll(tasks);
-
- int successCount = 0;
- for (Future<Boolean> future : futures) {
- Assert.assertTrue(future.isDone());
-
- // we are using Callable<V>, so if an exception
- // occurred during the operation, it will be thrown
- // when we call get
- Boolean success = future.get();
- if (success) {
- successCount++;
- }
- }
-
- Assert.assertEquals(
- "Exactly one delete operation should return true.",
- 1,
- successCount);
- } finally {
- if (es != null) {
- es.shutdownNow();
- }
- }
- }
-}
-
-abstract class FileSystemTask<V> implements Callable<V> {
- private final FileSystem fileSystem;
- private final Path path;
-
- protected FileSystem getFileSystem() {
- return this.fileSystem;
- }
-
- protected Path getFilePath() {
- return this.path;
- }
-
- FileSystemTask(FileSystem fs, Path p) {
- this.fileSystem = fs;
- this.path = p;
- }
-
- public abstract V call() throws Exception;
-}
-
-class DeleteFileTask extends FileSystemTask<Boolean> {
-
- DeleteFileTask(FileSystem fs, Path p) {
- super(fs, p);
- }
-
- @Override
- public Boolean call() throws Exception {
- return this.getFileSystem().delete(this.getFilePath(), false);
- }
-}
-
-class CreateFileTask extends FileSystemTask<Void> {
- CreateFileTask(FileSystem fs, Path p) {
- super(fs, p);
- }
-
- public Void call() throws Exception {
- FileSystem fs = getFileSystem();
- Path p = getFilePath();
-
- // Create an empty file and close the stream.
- FSDataOutputStream stream = fs.create(p, true);
- stream.close();
-
- // Delete the file. We don't care if delete returns true or false.
- // We just want to ensure the file does not exist.
- this.getFileSystem().delete(this.getFilePath(), false);
-
- return null;
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
deleted file mode 100644
index 217ca81..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import org.junit.Before;
-
-public class TestNativeAzureFileSystemContractEmulator extends
- FileSystemContractBaseTest {
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createForEmulator();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(fs);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
deleted file mode 100644
index b546009..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemContractLive extends
- FileSystemContractBaseTest {
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(fs);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- /**
- * The following tests are failing on Azure and the Azure
- * file system code needs to be modified to make them pass.
- * A separate work item has been opened for this.
- */
- @Ignore
- @Test
- public void testMoveFileUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameFileToSelf() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameChildDirForbidden() throws Exception {
- }
-
- @Ignore
- @Test
- public void testMoveDirUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameDirToSelf() throws Throwable {
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
index f458bb3..2809260 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
@@ -23,6 +23,9 @@ import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
+/**
+ * Mocked testing of FileSystemContractBaseTest.
+ */
public class TestNativeAzureFileSystemContractMocked extends
FileSystemContractBaseTest {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
deleted file mode 100644
index 2a88ad2..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import static org.junit.Assume.assumeNotNull;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemContractPageBlobLive extends
- FileSystemContractBaseTest {
- private AzureBlobStorageTestAccount testAccount;
-
- private AzureBlobStorageTestAccount createTestAccount()
- throws Exception {
- Configuration conf = new Configuration();
-
- // Configure the page blob directories key so every file created is a page blob.
- conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
-
- // Configure the atomic rename directories key so every folder will have
- // atomic rename applied.
- conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
- return AzureBlobStorageTestAccount.create(conf);
- }
-
- @Before
- public void setUp() throws Exception {
- testAccount = createTestAccount();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(fs);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- /**
- * The following tests are failing on Azure and the Azure
- * file system code needs to be modified to make them pass.
- * A separate work item has been opened for this.
- */
- @Ignore
- @Test
- public void testMoveFileUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameFileToSelf() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameChildDirForbidden() throws Exception {
- }
-
- @Ignore
- @Test
- public void testMoveDirUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameDirToSelf() throws Throwable {
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
index 82eabaa..0dfbb37 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
@@ -18,17 +18,11 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
import java.io.IOException;
import java.util.HashMap;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
+
import org.junit.Test;
/**
@@ -38,24 +32,18 @@ import org.junit.Test;
* creation/rename of files/directories through WASB that have colons in the
* names.
*/
-public class TestNativeAzureFileSystemFileNameCheck {
- private FileSystem fs = null;
- private AzureBlobStorageTestAccount testAccount = null;
+public class TestNativeAzureFileSystemFileNameCheck extends AbstractWasbTestBase {
private String root = null;
- @Before
+ @Override
public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createMock();
- fs = testAccount.getFileSystem();
+ super.setUp();
root = fs.getUri().toString();
}
- @After
- public void tearDown() throws Exception {
- testAccount.cleanup();
- root = null;
- fs = null;
- testAccount = null;
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createMock();
}
@Test
@@ -138,4 +126,4 @@ public class TestNativeAzureFileSystemFileNameCheck {
fsck.run(new String[] { p.toString() });
return fsck.getPathNameWarning();
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
deleted file mode 100644
index 6baba33..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-import org.junit.Test;
-
-import com.microsoft.azure.storage.StorageException;
-
-/*
- * Tests the Native Azure file system (WASB) against an actual blob store if
- * provided in the environment.
- */
-public class TestNativeAzureFileSystemLive extends
- NativeAzureFileSystemBaseTest {
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- @Test
- public void testLazyRenamePendingCanOverwriteExistingFile()
- throws Exception {
- final String SRC_FILE_KEY = "srcFile";
- final String DST_FILE_KEY = "dstFile";
- Path srcPath = new Path(SRC_FILE_KEY);
- FSDataOutputStream srcStream = fs.create(srcPath);
- assertTrue(fs.exists(srcPath));
- Path dstPath = new Path(DST_FILE_KEY);
- FSDataOutputStream dstStream = fs.create(dstPath);
- assertTrue(fs.exists(dstPath));
- NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
- final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath));
- final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath));
- nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null);
- assertTrue(fs.exists(dstPath));
- assertFalse(fs.exists(srcPath));
- IOUtils.cleanup(null, srcStream);
- IOUtils.cleanup(null, dstStream);
- }
- /**
- * Tests fs.delete() function to delete a blob when another blob is holding a
- * lease on it. Delete if called without a lease should fail if another process
- * is holding a lease and throw appropriate exception
- * This is a scenario that would happen in HMaster startup when it tries to
- * clean up the temp dirs while the HMaster process which was killed earlier
- * held lease on the blob when doing some DDL operation
- */
- @Test
- public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
- throws Exception {
- LOG.info("Starting test");
- final String FILE_KEY = "fileWithLease";
- // Create the file
- Path path = new Path(FILE_KEY);
- fs.create(path);
- assertTrue(fs.exists(path));
- NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
- final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
- final AzureNativeFileSystemStore store = nfs.getStore();
-
- // Acquire the lease on the file in a background thread
- final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
- final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
- Thread t = new Thread() {
- @Override
- public void run() {
- // Acquire the lease and then signal the main test thread.
- SelfRenewingLease lease = null;
- try {
- lease = store.acquireLease(fullKey);
- LOG.info("Lease acquired: " + lease.getLeaseID());
- } catch (AzureException e) {
- LOG.warn("Lease acqusition thread unable to acquire lease", e);
- } finally {
- leaseAttemptComplete.countDown();
- }
-
- // Wait for the main test thread to signal it will attempt the delete.
- try {
- beginningDeleteAttempt.await();
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- }
-
- // Keep holding the lease past the lease acquisition retry interval, so
- // the test covers the case of delete retrying to acquire the lease.
- try {
- Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- }
-
- try {
- if (lease != null){
- LOG.info("Freeing lease");
- lease.free();
- }
- } catch (StorageException se) {
- LOG.warn("Unable to free lease.", se);
- }
- }
- };
-
- // Start the background thread and wait for it to signal the lease is held.
- t.start();
- try {
- leaseAttemptComplete.await();
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- }
-
- // Try to delete the same file
- beginningDeleteAttempt.countDown();
- store.delete(fullKey);
-
- // At this point file SHOULD BE DELETED
- assertFalse(fs.exists(path));
- }
-
- /**
- * Check that isPageBlobKey works as expected. This assumes that
- * in the test configuration, the list of supported page blob directories
- * only includes "pageBlobs". That's why this test is made specific
- * to this subclass.
- */
- @Test
- public void testIsPageBlobKey() {
- AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-
- // Use literal strings so it's easier to understand the tests.
- // In case the constant changes, we want to know about it so we can update this test.
- assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs");
-
- // URI prefix for test environment.
- String uriPrefix = "file:///";
-
- // negative tests
- String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo",
- "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" };
- for (String s : negativeKeys) {
- assertFalse(store.isPageBlobKey(s));
- assertFalse(store.isPageBlobKey(uriPrefix + s));
- }
-
- // positive tests
- String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" };
- for (String s : positiveKeys) {
- assertTrue(store.isPageBlobKey(s));
- assertTrue(store.isPageBlobKey(uriPrefix + s));
- }
- }
-
- /**
- * Test that isAtomicRenameKey() works as expected.
- */
- @Test
- public void testIsAtomicRenameKey() {
-
- AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-
- // We want to know if the default configuration changes so we can fix
- // this test.
- assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES,
- "/atomicRenameDir1,/atomicRenameDir2");
-
- // URI prefix for test environment.
- String uriPrefix = "file:///";
-
- // negative tests
- String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase",
- "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase",
- "hbasexyz/", "foo/atomicRenameDir1/"};
- for (String s : negativeKeys) {
- assertFalse(store.isAtomicRenameKey(s));
- assertFalse(store.isAtomicRenameKey(uriPrefix + s));
- }
-
- // Positive tests. The directories for atomic rename are /hbase
- // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES
- // for this test).
- String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/",
- "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"};
- for (String s : positiveKeys) {
- assertTrue(store.isAtomicRenameKey(s));
- assertTrue(store.isAtomicRenameKey(uriPrefix + s));
- }
- }
-
- /**
- * Tests fs.mkdir() function to create a target blob while another thread
- * is holding the lease on the blob. mkdir should not fail since the blob
- * already exists.
- * This is a scenario that would happen in HBase distributed log splitting.
- * Multiple threads will try to create and update "recovered.edits" folder
- * under the same path.
- */
- @Test
- public void testMkdirOnExistingFolderWithLease() throws Exception {
- SelfRenewingLease lease;
- final String FILE_KEY = "folderWithLease";
- // Create the folder
- fs.mkdirs(new Path(FILE_KEY));
- NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
- String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY)));
- AzureNativeFileSystemStore store = nfs.getStore();
- // Acquire the lease on the folder
- lease = store.acquireLease(fullKey);
- assertTrue(lease.getLeaseID() != null);
- // Try to create the same folder
- store.storeEmptyFolder(fullKey,
- nfs.createPermissionStatus(FsPermission.getDirDefault()));
- lease.free();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
index aa1e4f7..20d45b2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
@@ -21,6 +21,10 @@ package org.apache.hadoop.fs.azure;
import java.io.IOException;
import org.junit.Ignore;
+/**
+ * Run {@link NativeAzureFileSystemBaseTest} tests against a mocked store,
+ * skipping tests of unsupported features
+ */
public class TestNativeAzureFileSystemMocked extends
NativeAzureFileSystemBaseTest {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
index 4c2df8d..7f63295 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
@@ -18,41 +18,27 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests for the upload, buffering and flush logic in WASB.
*/
-public class TestNativeAzureFileSystemUploadLogic {
- private AzureBlobStorageTestAccount testAccount;
+public class TestNativeAzureFileSystemUploadLogic extends AbstractWasbTestBase {
// Just an arbitrary number so that the values I write have a predictable
// pattern: 0, 1, 2, .. , 45, 46, 0, 1, 2, ...
static final int byteValuePeriod = 47;
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createMock();
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createMock();
}
/**
@@ -126,9 +112,9 @@ public class TestNativeAzureFileSystemUploadLogic {
* @param expectedSize The expected size of the data in there.
*/
private void assertDataInFile(Path file, int expectedSize) throws Exception {
- InputStream inStream = testAccount.getFileSystem().open(file);
- assertDataInStream(inStream, expectedSize);
- inStream.close();
+ try(InputStream inStream = getFileSystem().open(file)) {
+ assertDataInStream(inStream, expectedSize);
+ }
}
/**
@@ -139,7 +125,7 @@ public class TestNativeAzureFileSystemUploadLogic {
private void assertDataInTempBlob(int expectedSize) throws Exception {
// Look for the temporary upload blob in the backing store.
InMemoryBlockBlobStore backingStore =
- testAccount.getMockStorage().getBackingStore();
+ getTestAccount().getMockStorage().getBackingStore();
String tempKey = null;
for (String key : backingStore.getKeys()) {
if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) {
@@ -149,9 +135,10 @@ public class TestNativeAzureFileSystemUploadLogic {
}
}
assertNotNull(tempKey);
- InputStream inStream = new ByteArrayInputStream(backingStore.getContent(tempKey));
- assertDataInStream(inStream, expectedSize);
- inStream.close();
+ try (InputStream inStream = new ByteArrayInputStream(
+ backingStore.getContent(tempKey))) {
+ assertDataInStream(inStream, expectedSize);
+ }
}
/**
@@ -162,25 +149,30 @@ public class TestNativeAzureFileSystemUploadLogic {
*/
private void testConsistencyAfterManyFlushes(FlushFrequencyVariation variation)
throws Exception {
- Path uploadedFile = new Path("/uploadedFile");
- OutputStream outStream = testAccount.getFileSystem().create(uploadedFile);
- final int totalSize = 9123;
- int flushPeriod;
- switch (variation) {
- case BeforeSingleBufferFull: flushPeriod = 300; break;
- case AfterSingleBufferFull: flushPeriod = 600; break;
- case AfterAllRingBufferFull: flushPeriod = 1600; break;
- default:
- throw new IllegalArgumentException("Unknown variation: " + variation);
- }
- for (int i = 0; i < totalSize; i++) {
- outStream.write(i % byteValuePeriod);
- if ((i + 1) % flushPeriod == 0) {
- outStream.flush();
- assertDataInTempBlob(i + 1);
+ Path uploadedFile = methodPath();
+ try {
+ OutputStream outStream = getFileSystem().create(uploadedFile);
+ final int totalSize = 9123;
+ int flushPeriod;
+ switch (variation) {
+ case BeforeSingleBufferFull: flushPeriod = 300; break;
+ case AfterSingleBufferFull: flushPeriod = 600; break;
+ case AfterAllRingBufferFull: flushPeriod = 1600; break;
+ default:
+ throw new IllegalArgumentException("Unknown variation: " + variation);
}
+ for (int i = 0; i < totalSize; i++) {
+ outStream.write(i % byteValuePeriod);
+ if ((i + 1) % flushPeriod == 0) {
+ outStream.flush();
+ assertDataInTempBlob(i + 1);
+ }
+ }
+ outStream.close();
+ assertDataInFile(uploadedFile, totalSize);
+ } finally {
+ getFileSystem().delete(uploadedFile, false);
+
}
- outStream.close();
- assertDataInFile(uploadedFile, totalSize);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
index 544d6ab..303a89a 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
@@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
import java.util.HashMap;
import org.apache.hadoop.fs.FileStatus;
@@ -37,7 +32,8 @@ import org.junit.Test;
* Tests that WASB handles things gracefully when users add blobs to the Azure
* Storage container from outside WASB's control.
*/
-public class TestOutOfBandAzureBlobOperations {
+public class TestOutOfBandAzureBlobOperations
+ extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
deleted file mode 100644
index 60b01c6..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.blob.BlobOutputStream;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-public class TestOutOfBandAzureBlobOperationsLive {
- private FileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>")
- // eg oob creation of "user/<name>/testFolder/a/input/file"
- // Then wasb creation of "user/<name>/testFolder/a/output" fails
- @Test
- public void outOfBandFolder_uncleMkdirs() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder1/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
-
- Path targetFolder = new Path("testFolder1/a/output");
- assertTrue(fs.mkdirs(targetFolder));
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_parentDelete() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder2/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
-
- Path targetFolder = new Path("testFolder2/a/input");
- assertTrue(fs.delete(targetFolder, true));
- }
-
- @Test
- public void outOfBandFolder_rootFileDelete() throws Exception {
-
- CloudBlockBlob blob = testAccount.getBlobReference("fileY");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("/fileY")));
- assertTrue(fs.delete(new Path("/fileY"), true));
- }
-
- @Test
- public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
-
- CloudBlockBlob blob = testAccount.getBlobReference("folderW/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("/folderW")));
- assertTrue(fs.exists(new Path("/folderW/file")));
- assertTrue(fs.delete(new Path("/folderW"), true));
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_siblingCreate() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder3/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
-
- Path targetFile = new Path("testFolder3/a/input/file2");
- FSDataOutputStream s2 = fs.create(targetFile);
- s2.close();
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- // creating a new file in the root folder
- @Test
- public void outOfBandFolder_create_rootDir() throws Exception {
- Path targetFile = new Path("/newInRoot");
- FSDataOutputStream s2 = fs.create(targetFile);
- s2.close();
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_rename() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder4/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
-
- Path srcFilePath = new Path("testFolder4/a/input/file");
- assertTrue(fs.exists(srcFilePath));
-
- Path destFilePath = new Path("testFolder4/a/input/file2");
- fs.rename(srcFilePath, destFilePath);
- }
-
- // Verify that you can rename a file which is the only file in an implicit folder in the
- // WASB file system.
- // scenario for this particular test described at MONARCH-HADOOP-892
- @Test
- public void outOfBandSingleFile_rename() throws Exception {
-
- //NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
-
- Path srcFilePath = new Path("testFolder5/a/input/file");
- assertTrue(fs.exists(srcFilePath));
-
- Path destFilePath = new Path("testFolder5/file2");
- fs.rename(srcFilePath, destFilePath);
- }
-
- // WASB must force explicit parent directories in create, delete, mkdirs, rename.
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- CloudBlockBlob blob = testAccount.getBlobReference("fileX");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
-
- Path srcFilePath = new Path("/fileX");
- assertTrue(fs.exists(srcFilePath));
-
- Path destFilePath = new Path("/fileXrename");
- fs.rename(srcFilePath, destFilePath);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
deleted file mode 100644
index 41b8386..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Write data into a page blob and verify you can read back all of it
- * or just a part of it.
- */
-public class TestReadAndSeekPageBlobAfterWrite {
- private static final Log LOG = LogFactory.getLog(TestReadAndSeekPageBlobAfterWrite.class);
-
- private FileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
- private byte[] randomData;
-
- // Page blob physical page size
- private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
-
- // Size of data on page (excluding header)
- private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
- private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
- private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
- private Random rand = new Random();
-
- // A key with a prefix under /pageBlobs, which for the test file system will
- // force use of a page blob.
- private static final String KEY = "/pageBlobs/file.dat";
- private static final Path PATH = new Path(KEY); // path of page blob file to read and write
-
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- @Before
- public void setUp() throws Exception {
- testAccount = createTestAccount();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
-
- // Make sure we are using an integral number of pages.
- assertEquals(0, MAX_BYTES % PAGE_SIZE);
-
- // load an in-memory array of random data
- randomData = new byte[PAGE_SIZE * MAX_PAGES];
- rand.nextBytes(randomData);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- /**
- * Make sure the file name (key) is a page blob file name. If anybody changes that,
- * we need to come back and update this test class.
- */
- @Test
- public void testIsPageBlobFileName() {
- AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
- String[] a = KEY.split("/");
- String key2 = a[1] + "/";
- assertTrue(store.isPageBlobKey(key2));
- }
-
- /**
- * For a set of different file sizes, write some random data to a page blob,
- * read it back, and compare that what was read is the same as what was written.
- */
- @Test
- public void testReadAfterWriteRandomData() throws IOException {
-
- // local shorthand
- final int PDS = PAGE_DATA_SIZE;
-
- // Test for sizes at and near page boundaries
- int[] dataSizes = {
-
- // on first page
- 0, 1, 2, 3,
-
- // Near first physical page boundary (because the implementation
- // stores PDS + the page header size bytes on each page).
- PDS - 1, PDS, PDS + 1, PDS + 2, PDS + 3,
-
- // near second physical page boundary
- (2 * PDS) - 1, (2 * PDS), (2 * PDS) + 1, (2 * PDS) + 2, (2 * PDS) + 3,
-
- // near tenth physical page boundary
- (10 * PDS) - 1, (10 * PDS), (10 * PDS) + 1, (10 * PDS) + 2, (10 * PDS) + 3,
-
- // test one big size, >> 4MB (an internal buffer size in the code)
- MAX_BYTES
- };
-
- for (int i : dataSizes) {
- testReadAfterWriteRandomData(i);
- }
- }
-
- private void testReadAfterWriteRandomData(int size) throws IOException {
- writeRandomData(size);
- readRandomDataAndVerify(size);
- }
-
- /**
- * Read "size" bytes of data and verify that what was read and what was written
- * are the same.
- */
- private void readRandomDataAndVerify(int size) throws AzureException, IOException {
- byte[] b = new byte[size];
- FSDataInputStream stream = fs.open(PATH);
- int bytesRead = stream.read(b);
- stream.close();
- assertEquals(bytesRead, size);
-
- // compare the data read to the data written
- assertTrue(comparePrefix(randomData, b, size));
- }
-
- // return true if the beginning "size" values of the arrays are the same
- private boolean comparePrefix(byte[] a, byte[] b, int size) {
- if (a.length < size || b.length < size) {
- return false;
- }
- for (int i = 0; i < size; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
- }
-
- // Write a specified amount of random data to the file path for this test class.
- private void writeRandomData(int size) throws IOException {
- OutputStream output = fs.create(PATH);
- output.write(randomData, 0, size);
- output.close();
- }
-
- /**
- * Write data to a page blob, open it, seek, and then read a range of data.
- * Then compare that the data read from that range is the same as the data originally written.
- */
- @Test
- public void testPageBlobSeekAndReadAfterWrite() throws IOException {
- writeRandomData(PAGE_SIZE * MAX_PAGES);
- int recordSize = 100;
- byte[] b = new byte[recordSize];
- FSDataInputStream stream = fs.open(PATH);
-
- // Seek to a boundary around the middle of the 6th page
- int seekPosition = 5 * PAGE_SIZE + 250;
- stream.seek(seekPosition);
-
- // Read a record's worth of bytes and verify results
- int bytesRead = stream.read(b);
- verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
- // Seek to another spot and read a record greater than a page
- seekPosition = 10 * PAGE_SIZE + 250;
- stream.seek(seekPosition);
- recordSize = 1000;
- b = new byte[recordSize];
- bytesRead = stream.read(b);
- verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
- // Read the last 100 bytes of the file
- recordSize = 100;
- seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
- stream.seek(seekPosition);
- b = new byte[recordSize];
- bytesRead = stream.read(b);
- verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
- // Read past the end of the file and we should get only partial data.
- recordSize = 100;
- seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
- stream.seek(seekPosition);
- b = new byte[recordSize];
- bytesRead = stream.read(b);
- assertEquals(50, bytesRead);
-
- // compare last 50 bytes written with those read
- byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
- assertTrue(comparePrefix(tail, b, 50));
- }
-
- // Verify that reading a record of data after seeking gives the expected data.
- private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
- byte[] originalRecordData =
- Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
- assertEquals(recordSize, bytesRead);
- assertTrue(comparePrefix(originalRecordData, b, recordSize));
- }
-
- // Test many small flushed writes interspersed with periodic hflush calls.
- // For manual testing, increase NUM_WRITES to a large number.
- // The goal for a long-running manual test is to make sure that it finishes
- // and the close() call does not time out. It also facilitates debugging into
- // hflush/hsync.
- @Test
- public void testManySmallWritesWithHFlush() throws IOException {
- writeAndReadOneFile(50, 100, 20);
- }
-
- /**
- * Write a total of numWrites * recordLength data to a file, read it back,
- * and check to make sure what was read is the same as what was written.
- * The syncInterval is the number of writes after which to call hflush to
- * force the data to storage.
- */
- private void writeAndReadOneFile(int numWrites, int recordLength, int syncInterval) throws IOException {
- final int NUM_WRITES = numWrites;
- final int RECORD_LENGTH = recordLength;
- final int SYNC_INTERVAL = syncInterval;
-
- // A lower bound on the minimum time we think it will take to do
- // a write to Azure storage.
- final long MINIMUM_EXPECTED_TIME = 20;
- LOG.info("Writing " + NUM_WRITES * RECORD_LENGTH + " bytes to " + PATH.getName());
- FSDataOutputStream output = fs.create(PATH);
- int writesSinceHFlush = 0;
- try {
-
- // Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
- // to test concurrent execution gates.
- output.flush();
- output.hflush();
- for (int i = 0; i < NUM_WRITES; i++) {
- output.write(randomData, i * RECORD_LENGTH, RECORD_LENGTH);
- writesSinceHFlush++;
- output.flush();
- if ((i % SYNC_INTERVAL) == 0) {
- output.hflush();
- writesSinceHFlush = 0;
- }
- }
- } finally {
- long start = Time.monotonicNow();
- output.close();
- long end = Time.monotonicNow();
- LOG.debug("close duration = " + (end - start) + " msec.");
- if (writesSinceHFlush > 0) {
- assertTrue(String.format(
- "close duration with >= 1 pending write is %d, less than minimum expected of %d",
- end - start, MINIMUM_EXPECTED_TIME),
- end - start >= MINIMUM_EXPECTED_TIME);
- }
- }
-
- // Read the data back and check it.
- FSDataInputStream stream = fs.open(PATH);
- int SIZE = NUM_WRITES * RECORD_LENGTH;
- byte[] b = new byte[SIZE];
- try {
- stream.seek(0);
- stream.read(b, 0, SIZE);
- verifyReadRandomData(b, SIZE, 0, SIZE);
- } finally {
- stream.close();
- }
-
- // delete the file
- fs.delete(PATH, false);
- }
-
- // Test writing to a large file repeatedly as a stress test.
- // Set the repetitions to a larger number for manual testing
- // for a longer stress run.
- @Test
- public void testLargeFileStress() throws IOException {
- int numWrites = 32;
- int recordSize = 1024 * 1024;
- int syncInterval = 10;
- int repetitions = 1;
- for (int i = 0; i < repetitions; i++) {
- writeAndReadOneFile(numWrites, recordSize, syncInterval);
- }
- }
-
- // Write to a file repeatedly to verify that it extends.
- // The page blob file should start out at 128MB and finish at 256MB.
- @Test(timeout=300000)
- public void testFileSizeExtension() throws IOException {
- final int writeSize = 1024 * 1024;
- final int numWrites = 129;
- final byte dataByte = 5;
- byte[] data = new byte[writeSize];
- Arrays.fill(data, dataByte);
- FSDataOutputStream output = fs.create(PATH);
- try {
- for (int i = 0; i < numWrites; i++) {
- output.write(data);
- output.hflush();
- LOG.debug("total writes = " + (i + 1));
- }
- } finally {
- output.close();
- }
-
- // Show that we wrote more than the default page blob file size.
- assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
-
- // Verify we can list the new size. That will prove we expanded the file.
- FileStatus[] status = fs.listStatus(PATH);
- assertTrue(status[0].getLen() == numWrites * writeSize);
- LOG.debug("Total bytes written to " + PATH + " = " + status[0].getLen());
- fs.delete(PATH, false);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
index 0bf33d8..0334c39 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
@@ -19,20 +19,23 @@
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
-import static org.junit.Assert.assertEquals;
import java.io.File;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-public class TestShellDecryptionKeyProvider {
- public static final Log LOG = LogFactory
- .getLog(TestShellDecryptionKeyProvider.class);
+/**
+ * Windows only tests of shell scripts to provide decryption keys.
+ */
+public class TestShellDecryptionKeyProvider
+ extends AbstractWasbTestWithTimeout {
+ public static final Logger LOG = LoggerFactory
+ .getLogger(TestShellDecryptionKeyProvider.class);
private static File TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
index 467424b..9d32fb2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
@@ -18,10 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -32,7 +28,10 @@ import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
-public class TestWasbFsck {
+/**
+ * Tests which look at fsck recovery.
+ */
+public class TestWasbFsck extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
new file mode 100644
index 0000000..0aa9393
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.util.Arrays;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+
+import org.junit.After;
+import org.junit.Test;
+
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlockEntry;
+import com.microsoft.azure.storage.blob.BlockSearchMode;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.core.Base64;
+
+/**
+ * Test that we do proper data integrity validation with MD5 checks as
+ * configured.
+ */
+public class ITestBlobDataValidation extends AbstractWasbTestWithTimeout {
+ private AzureBlobStorageTestAccount testAccount;
+
+ @After
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanupTestAccount(testAccount);
+ }
+
+ /**
+ * Test that by default we don't store the blob-level MD5.
+ */
+ @Test
+ public void testBlobMd5StoreOffByDefault() throws Exception {
+ testAccount = AzureBlobStorageTestAccount.create();
+ testStoreBlobMd5(false);
+ }
+
+ /**
+ * Test that we get blob-level MD5 storage and validation if we specify that
+ * in the configuration.
+ */
+ @Test
+ public void testStoreBlobMd5() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(KEY_STORE_BLOB_MD5, true);
+ testAccount = AzureBlobStorageTestAccount.create(conf);
+ testStoreBlobMd5(true);
+ }
+
+ /**
+ * Trims a suffix/prefix from the given string. For example if
+ * s is given as "/xy" and toTrim is "/", this method returns "xy"
+ */
+ private static String trim(String s, String toTrim) {
+ return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim),
+ toTrim);
+ }
+
+ private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
+ assumeNotNull(testAccount);
+ // Write a test file.
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ Path testFilePath = AzureTestUtils.pathForTests(fs,
+ methodName.getMethodName());
+ String testFileKey = trim(testFilePath.toUri().getPath(), "/");
+ OutputStream outStream = fs.create(testFilePath);
+ outStream.write(new byte[] { 5, 15 });
+ outStream.close();
+
+ // Check that we stored/didn't store the MD5 field as configured.
+ CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
+ blob.downloadAttributes();
+ String obtainedMd5 = blob.getProperties().getContentMD5();
+ if (expectMd5Stored) {
+ assertNotNull(obtainedMd5);
+ } else {
+ assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
+ }
+
+ // Mess with the content so it doesn't match the MD5.
+ String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
+ blob.uploadBlock(newBlockId,
+ new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
+ blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(
+ newBlockId, BlockSearchMode.UNCOMMITTED) }));
+
+ // Now read back the content. If we stored the MD5 for the blob content
+ // we should get a data corruption error.
+ InputStream inStream = fs.open(testFilePath);
+ try {
+ byte[] inBuf = new byte[100];
+ while (inStream.read(inBuf) > 0){
+ //nothing;
+ }
+ inStream.close();
+ if (expectMd5Stored) {
+ fail("Should've thrown because of data corruption.");
+ }
+ } catch (IOException ex) {
+ if (!expectMd5Stored) {
+ throw ex;
+ }
+ StorageException cause = (StorageException)ex.getCause();
+ assertNotNull(cause);
+ assertEquals("Unexpected cause: " + cause,
+ StorageErrorCodeStrings.INVALID_MD5, cause.getErrorCode());
+ }
+ }
+
+ /**
+ * Test that by default we check block-level MD5.
+ */
+ @Test
+ public void testCheckBlockMd5() throws Exception {
+ testAccount = AzureBlobStorageTestAccount.create();
+ testCheckBlockMd5(true);
+ }
+
+ /**
+ * Test that we don't check block-level MD5 if we specify that in the
+ * configuration.
+ */
+ @Test
+ public void testDontCheckBlockMd5() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
+ testAccount = AzureBlobStorageTestAccount.create(conf);
+ testCheckBlockMd5(false);
+ }
+
+ /**
+ * Connection inspector to check that MD5 fields for content is set/not set as
+ * expected.
+ */
+ private static class ContentMD5Checker extends
+ StorageEvent<ResponseReceivedEvent> {
+ private final boolean expectMd5;
+
+ public ContentMD5Checker(boolean expectMd5) {
+ this.expectMd5 = expectMd5;
+ }
+
+ @Override
+ public void eventOccurred(ResponseReceivedEvent eventArg) {
+ HttpURLConnection connection = (HttpURLConnection) eventArg
+ .getConnectionObject();
+ if (isGetRange(connection)) {
+ checkObtainedMd5(connection
+ .getHeaderField(Constants.HeaderConstants.CONTENT_MD5));
+ } else if (isPutBlock(connection)) {
+ checkObtainedMd5(connection
+ .getRequestProperty(Constants.HeaderConstants.CONTENT_MD5));
+ }
+ }
+
+ private void checkObtainedMd5(String obtainedMd5) {
+ if (expectMd5) {
+ assertNotNull(obtainedMd5);
+ } else {
+ assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
+ }
+ }
+
+ private static boolean isPutBlock(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("PUT")
+ && connection.getURL().getQuery() != null
+ && connection.getURL().getQuery().contains("blockid");
+ }
+
+ private static boolean isGetRange(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("GET")
+ && connection
+ .getHeaderField(Constants.HeaderConstants.STORAGE_RANGE_HEADER) != null;
+ }
+ }
+
+ private void testCheckBlockMd5(final boolean expectMd5Checked)
+ throws Exception {
+ assumeNotNull(testAccount);
+ Path testFilePath = new Path("/testFile");
+
+ // Add a hook to check that for GET/PUT requests we set/don't set
+ // the block-level MD5 field as configured. I tried to do clever
+ // testing by also messing with the raw data to see if we actually
+ // validate the data as expected, but the HttpURLConnection wasn't
+ // pluggable enough for me to do that.
+ testAccount.getFileSystem().getStore()
+ .addTestHookToOperationContext(new TestHookOperationContext() {
+ @Override
+ public OperationContext modifyOperationContext(
+ OperationContext original) {
+ original.getResponseReceivedEventHandler().addListener(
+ new ContentMD5Checker(expectMd5Checked));
+ return original;
+ }
+ });
+
+ OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
+ outStream.write(new byte[] { 5, 15 });
+ outStream.close();
+
+ InputStream inStream = testAccount.getFileSystem().open(testFilePath);
+ byte[] inBuf = new byte[100];
+ while (inStream.read(inBuf) > 0){
+ //nothing;
+ }
+ inStream.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java
new file mode 100644
index 0000000..b46ad5b
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobTypeSpeedDifference.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Date;
+
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
+
+
+/**
+ * A simple benchmark to find out the difference in speed between block
+ * and page blobs.
+ */
+public class ITestBlobTypeSpeedDifference extends AbstractWasbTestBase {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ /**
+ * Writes data to the given stream of the given size, flushing every
+ * x bytes.
+ */
+ private static void writeTestFile(OutputStream writeStream,
+ long size, long flushInterval) throws IOException {
+ int bufferSize = (int) Math.min(1000, flushInterval);
+ byte[] buffer = new byte[bufferSize];
+ Arrays.fill(buffer, (byte) 7);
+ int bytesWritten = 0;
+ int bytesUnflushed = 0;
+ while (bytesWritten < size) {
+ int numberToWrite = (int) Math.min(bufferSize, size - bytesWritten);
+ writeStream.write(buffer, 0, numberToWrite);
+ bytesWritten += numberToWrite;
+ bytesUnflushed += numberToWrite;
+ if (bytesUnflushed >= flushInterval) {
+ writeStream.flush();
+ bytesUnflushed = 0;
+ }
+ }
+ }
+
+ private static class TestResult {
+ final long timeTakenInMs;
+ final long totalNumberOfRequests;
+
+ TestResult(long timeTakenInMs, long totalNumberOfRequests) {
+ this.timeTakenInMs = timeTakenInMs;
+ this.totalNumberOfRequests = totalNumberOfRequests;
+ }
+ }
+
+ /**
+ * Writes data to the given file of the given size, flushing every
+ * x bytes. Measure performance of that and return it.
+ */
+ private static TestResult writeTestFile(NativeAzureFileSystem fs, Path path,
+ long size, long flushInterval) throws IOException {
+ AzureFileSystemInstrumentation instrumentation =
+ fs.getInstrumentation();
+ long initialRequests = instrumentation.getCurrentWebResponses();
+ Date start = new Date();
+ OutputStream output = fs.create(path);
+ writeTestFile(output, size, flushInterval);
+ output.close();
+ long finalRequests = instrumentation.getCurrentWebResponses();
+ return new TestResult(new Date().getTime() - start.getTime(),
+ finalRequests - initialRequests);
+ }
+
+ /**
+ * Writes data to a block blob of the given size, flushing every
+ * x bytes. Measure performance of that and return it.
+ */
+ private static TestResult writeBlockBlobTestFile(NativeAzureFileSystem fs,
+ long size, long flushInterval) throws IOException {
+ return writeTestFile(fs, new Path("/blockBlob"), size, flushInterval);
+ }
+
+ /**
+ * Writes data to a page blob of the given size, flushing every
+ * x bytes. Measure performance of that and return it.
+ */
+ private static TestResult writePageBlobTestFile(NativeAzureFileSystem fs,
+ long size, long flushInterval) throws IOException {
+ Path testFile = AzureTestUtils.blobPathForTests(fs,
+ "writePageBlobTestFile");
+ return writeTestFile(fs,
+ testFile,
+ size, flushInterval);
+ }
+
+ /**
+ * Runs the benchmark over a small 10 KB file, flushing every 500 bytes.
+ */
+ @Test
+ public void testTenKbFileFrequentFlush() throws Exception {
+ testForSizeAndFlushInterval(getFileSystem(), 10 * 1000, 500);
+ }
+
+ /**
+ * Runs the benchmark for the given file size and flush frequency.
+ */
+ private static void testForSizeAndFlushInterval(NativeAzureFileSystem fs,
+ final long size, final long flushInterval) throws IOException {
+ for (int i = 0; i < 5; i++) {
+ TestResult pageBlobResults = writePageBlobTestFile(fs, size, flushInterval);
+ System.out.printf(
+ "Page blob upload took %d ms. Total number of requests: %d.\n",
+ pageBlobResults.timeTakenInMs, pageBlobResults.totalNumberOfRequests);
+ TestResult blockBlobResults = writeBlockBlobTestFile(fs, size, flushInterval);
+ System.out.printf(
+ "Block blob upload took %d ms. Total number of requests: %d.\n",
+ blockBlobResults.timeTakenInMs, blockBlobResults.totalNumberOfRequests);
+ }
+ }
+
+ /**
+ * Runs the benchmark for the given file size and flush frequency from the
+ * command line.
+ */
+ public static void main(String[] argv) throws Exception {
+ Configuration conf = new Configuration();
+ long size = 10 * 1000 * 1000;
+ long flushInterval = 2000;
+ if (argv.length > 0) {
+ size = Long.parseLong(argv[0]);
+ }
+ if (argv.length > 1) {
+ flushInterval = Long.parseLong(argv[1]);
+ }
+ testForSizeAndFlushInterval(
+ (NativeAzureFileSystem) FileSystem.get(conf),
+ size,
+ flushInterval);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java
new file mode 100644
index 0000000..07a13df
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlockBlobInputStream.java
@@ -0,0 +1,874 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+import java.util.concurrent.Callable;
+
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
+
+import static org.junit.Assume.assumeNotNull;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test semantics and performance of the original block blob input stream
+ * (KEY_INPUT_STREAM_VERSION=1) and the new
+ * <code>BlockBlobInputStream</code> (KEY_INPUT_STREAM_VERSION=2).
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+
+public class ITestBlockBlobInputStream extends AbstractAzureScaleTest {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ ITestBlockBlobInputStream.class);
+ private static final int KILOBYTE = 1024;
+ private static final int MEGABYTE = KILOBYTE * KILOBYTE;
+ private static final int TEST_FILE_SIZE = 6 * MEGABYTE;
+ private static final Path TEST_FILE_PATH = new Path(
+ "TestBlockBlobInputStream.txt");
+
+ private AzureBlobStorageTestAccount accountUsingInputStreamV1;
+ private AzureBlobStorageTestAccount accountUsingInputStreamV2;
+ private long testFileLength;
+
+
+
+ private FileStatus testFileStatus;
+ private Path hugefile;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ Configuration conf = new Configuration();
+ conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
+
+ accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+ conf,
+ true);
+
+ accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+ null,
+ true);
+
+ assumeNotNull(accountUsingInputStreamV1);
+ assumeNotNull(accountUsingInputStreamV2);
+ hugefile = fs.makeQualified(TEST_FILE_PATH);
+ try {
+ testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
+ testFileLength = testFileStatus.getLen();
+ } catch (FileNotFoundException e) {
+ // file doesn't exist
+ testFileLength = 0;
+ }
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(AzureNativeFileSystemStore.KEY_INPUT_STREAM_VERSION, 1);
+
+ accountUsingInputStreamV1 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+ conf,
+ true);
+
+ accountUsingInputStreamV2 = AzureBlobStorageTestAccount.create(
+ "testblockblobinputstream",
+ EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+ null,
+ true);
+
+ assumeNotNull(accountUsingInputStreamV1);
+ assumeNotNull(accountUsingInputStreamV2);
+ return accountUsingInputStreamV1;
+ }
+
+ /**
+ * Create a test file by repeating the characters in the alphabet.
+ * @throws IOException
+ */
+ private void createTestFileAndSetLength() throws IOException {
+ FileSystem fs = accountUsingInputStreamV1.getFileSystem();
+
+ // To reduce test run time, the test file can be reused.
+ if (fs.exists(TEST_FILE_PATH)) {
+ testFileStatus = fs.getFileStatus(TEST_FILE_PATH);
+ testFileLength = testFileStatus.getLen();
+ LOG.info("Reusing test file: {}", testFileStatus);
+ return;
+ }
+
+ int sizeOfAlphabet = ('z' - 'a' + 1);
+ byte[] buffer = new byte[26 * KILOBYTE];
+ char character = 'a';
+ for (int i = 0; i < buffer.length; i++) {
+ buffer[i] = (byte) character;
+ character = (character == 'z') ? 'a' : (char) ((int) character + 1);
+ }
+
+ LOG.info("Creating test file {} of size: {}", TEST_FILE_PATH,
+ TEST_FILE_SIZE);
+ ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+
+ try(FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) {
+ int bytesWritten = 0;
+ while (bytesWritten < TEST_FILE_SIZE) {
+ outputStream.write(buffer);
+ bytesWritten += buffer.length;
+ }
+ LOG.info("Closing stream {}", outputStream);
+ ContractTestUtils.NanoTimer closeTimer
+ = new ContractTestUtils.NanoTimer();
+ outputStream.close();
+ closeTimer.end("time to close() output stream");
+ }
+ timer.end("time to write %d KB", TEST_FILE_SIZE / 1024);
+ testFileLength = fs.getFileStatus(TEST_FILE_PATH).getLen();
+ }
+
+ void assumeHugeFileExists() throws IOException {
+ ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
+ FileStatus status = fs.getFileStatus(hugefile);
+ ContractTestUtils.assertIsFile(hugefile, status);
+ assertTrue("File " + hugefile + " is empty", status.getLen() > 0);
+ }
+
+ /**
+ * Calculate megabits per second from the specified values for bytes and
+ * milliseconds.
+ * @param bytes The number of bytes.
+ * @param milliseconds The number of milliseconds.
+ * @return The number of megabits per second.
+ */
+ private static double toMbps(long bytes, long milliseconds) {
+ return bytes / 1000.0 * 8 / milliseconds;
+ }
+
+ @Test
+ public void test_0100_CreateHugeFile() throws IOException {
+ createTestFileAndSetLength();
+ }
+
+ @Test
+ public void test_0200_BasicReadTest() throws Exception {
+ assumeHugeFileExists();
+
+ try (
+ FSDataInputStream inputStreamV1
+ = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+ FSDataInputStream inputStreamV2
+ = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+ ) {
+ byte[] bufferV1 = new byte[3 * MEGABYTE];
+ byte[] bufferV2 = new byte[bufferV1.length];
+
+ // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
+ inputStreamV1.seek(5 * MEGABYTE);
+ int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
+ assertEquals(KILOBYTE, numBytesReadV1);
+
+ // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
+ inputStreamV2.seek(5 * MEGABYTE);
+ int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
+ assertEquals(KILOBYTE, numBytesReadV2);
+
+ assertArrayEquals(bufferV1, bufferV2);
+
+ int len = MEGABYTE;
+ int offset = bufferV1.length - len;
+
+ // v1 reverse seek and read a megabyte into last megabyte of bufferV1
+ inputStreamV1.seek(3 * MEGABYTE);
+ numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
+ assertEquals(len, numBytesReadV1);
+
+ // v2 reverse seek and read a megabyte into last megabyte of bufferV2
+ inputStreamV2.seek(3 * MEGABYTE);
+ numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
+ assertEquals(len, numBytesReadV2);
+
+ assertArrayEquals(bufferV1, bufferV2);
+ }
+ }
+
+ @Test
+ public void test_0201_RandomReadTest() throws Exception {
+ assumeHugeFileExists();
+
+ try (
+ FSDataInputStream inputStreamV1
+ = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+ FSDataInputStream inputStreamV2
+ = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+ ) {
+ final int bufferSize = 4 * KILOBYTE;
+ byte[] bufferV1 = new byte[bufferSize];
+ byte[] bufferV2 = new byte[bufferV1.length];
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ inputStreamV1.seek(0);
+ inputStreamV2.seek(0);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ int seekPosition = 2 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ inputStreamV1.seek(0);
+ inputStreamV2.seek(0);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ seekPosition = 5 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ seekPosition = 10 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+
+ seekPosition = 4100 * KILOBYTE;
+ inputStreamV1.seek(seekPosition);
+ inputStreamV2.seek(seekPosition);
+
+ verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2);
+ }
+ }
+
+ private void verifyConsistentReads(FSDataInputStream inputStreamV1,
+ FSDataInputStream inputStreamV2,
+ byte[] bufferV1,
+ byte[] bufferV2) throws IOException {
+ int size = bufferV1.length;
+ final int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, size);
+ assertEquals("Bytes read from V1 stream", size, numBytesReadV1);
+
+ final int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, size);
+ assertEquals("Bytes read from V2 stream", size, numBytesReadV2);
+
+ assertArrayEquals("Mismatch in read data", bufferV1, bufferV2);
+ }
+
+ /**
+ * Validates the implementation of InputStream.markSupported.
+ * @throws IOException
+ */
+ @Test
+ public void test_0301_MarkSupportedV1() throws IOException {
+ validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.markSupported.
+ * @throws IOException
+ */
+ @Test
+ public void test_0302_MarkSupportedV2() throws IOException {
+ validateMarkSupported(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ private void validateMarkSupported(FileSystem fs) throws IOException {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ assertTrue("mark is not supported", inputStream.markSupported());
+ }
+ }
+
+ /**
+ * Validates the implementation of InputStream.mark and reset
+ * for version 1 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0303_MarkAndResetV1() throws Exception {
+ validateMarkAndReset(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.mark and reset
+ * for version 2 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0304_MarkAndResetV2() throws Exception {
+ validateMarkAndReset(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateMarkAndReset(FileSystem fs) throws Exception {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ inputStream.mark(KILOBYTE - 1);
+
+ byte[] buffer = new byte[KILOBYTE];
+ int bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+
+ inputStream.reset();
+ assertEquals("rest -> pos 0", 0, inputStream.getPos());
+
+ inputStream.mark(8 * KILOBYTE - 1);
+
+ buffer = new byte[8 * KILOBYTE];
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+
+ intercept(IOException.class,
+ "Resetting to invalid mark",
+ new Callable<FSDataInputStream>() {
+ @Override
+ public FSDataInputStream call() throws Exception {
+ inputStream.reset();
+ return inputStream;
+ }
+ }
+ );
+ }
+ }
+
+ /**
+ * Validates the implementation of Seekable.seekToNewSource, which should
+ * return false for version 1 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0305_SeekToNewSourceV1() throws IOException {
+ validateSeekToNewSource(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of Seekable.seekToNewSource, which should
+ * return false for version 2 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0306_SeekToNewSourceV2() throws IOException {
+ validateSeekToNewSource(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSeekToNewSource(FileSystem fs) throws IOException {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ assertFalse(inputStream.seekToNewSource(0));
+ }
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip and ensures there is no
+ * network I/O for version 1 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0307_SkipBoundsV1() throws Exception {
+ validateSkipBounds(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip and ensures there is no
+ * network I/O for version 2 of the block blob input stream.
+ * @throws Exception
+ */
+ @Test
+ public void test_0308_SkipBoundsV2() throws Exception {
+ validateSkipBounds(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSkipBounds(FileSystem fs) throws Exception {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ NanoTimer timer = new NanoTimer();
+
+ long skipped = inputStream.skip(-1);
+ assertEquals(0, skipped);
+
+ skipped = inputStream.skip(0);
+ assertEquals(0, skipped);
+
+ assertTrue(testFileLength > 0);
+
+ skipped = inputStream.skip(testFileLength);
+ assertEquals(testFileLength, skipped);
+
+ intercept(EOFException.class,
+ new Callable<Long>() {
+ @Override
+ public Long call() throws Exception {
+ return inputStream.skip(1);
+ }
+ }
+ );
+ long elapsedTimeMs = timer.elapsedTimeMs();
+ assertTrue(
+ String.format(
+ "There should not be any network I/O (elapsedTimeMs=%1$d).",
+ elapsedTimeMs),
+ elapsedTimeMs < 20);
+ }
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek and ensures there is no
+ * network I/O for forward seek.
+ * @throws Exception
+ */
+ @Test
+ public void test_0309_SeekBoundsV1() throws Exception {
+ validateSeekBounds(accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek and ensures there is no
+ * network I/O for forward seek.
+ * @throws Exception
+ */
+ @Test
+ public void test_0310_SeekBoundsV2() throws Exception {
+ validateSeekBounds(accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSeekBounds(FileSystem fs) throws Exception {
+ assumeHugeFileExists();
+ try (
+ FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
+ ) {
+ NanoTimer timer = new NanoTimer();
+
+ inputStream.seek(0);
+ assertEquals(0, inputStream.getPos());
+
+ intercept(EOFException.class,
+ FSExceptionMessages.NEGATIVE_SEEK,
+ new Callable<FSDataInputStream>() {
+ @Override
+ public FSDataInputStream call() throws Exception {
+ inputStream.seek(-1);
+ return inputStream;
+ }
+ }
+ );
+
+ assertTrue("Test file length only " + testFileLength, testFileLength > 0);
+ inputStream.seek(testFileLength);
+ assertEquals(testFileLength, inputStream.getPos());
+
+ intercept(EOFException.class,
+ FSExceptionMessages.CANNOT_SEEK_PAST_EOF,
+ new Callable<FSDataInputStream>() {
+ @Override
+ public FSDataInputStream call() throws Exception {
+ inputStream.seek(testFileLength + 1);
+ return inputStream;
+ }
+ }
+ );
+
+ long elapsedTimeMs = timer.elapsedTimeMs();
+ assertTrue(
+ String.format(
+ "There should not be any network I/O (elapsedTimeMs=%1$d).",
+ elapsedTimeMs),
+ elapsedTimeMs < 20);
+ }
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek, Seekable.getPos,
+ * and InputStream.available.
+ * @throws Exception
+ */
+ @Test
+ public void test_0311_SeekAndAvailableAndPositionV1() throws Exception {
+ validateSeekAndAvailableAndPosition(
+ accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of Seekable.seek, Seekable.getPos,
+ * and InputStream.available.
+ * @throws Exception
+ */
+ @Test
+ public void test_0312_SeekAndAvailableAndPositionV2() throws Exception {
+ validateSeekAndAvailableAndPosition(
+ accountUsingInputStreamV2.getFileSystem());
+ }
+
+ private void validateSeekAndAvailableAndPosition(FileSystem fs)
+ throws Exception {
+ assumeHugeFileExists();
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
+ byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
+ byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
+ byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
+ byte[] buffer = new byte[3];
+
+ int bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected1, buffer);
+ assertEquals(buffer.length, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected2, buffer);
+ assertEquals(2 * buffer.length, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // reverse seek
+ int seekPos = 0;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected1, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // reverse seek
+ seekPos = 1;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected3, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // forward seek
+ seekPos = 6;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected4, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ }
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip, Seekable.getPos,
+ * and InputStream.available.
+ * @throws IOException
+ */
+ @Test
+ public void test_0313_SkipAndAvailableAndPositionV1() throws IOException {
+ validateSkipAndAvailableAndPosition(
+ accountUsingInputStreamV1.getFileSystem());
+ }
+
+ /**
+ * Validates the implementation of InputStream.skip, Seekable.getPos,
+ * and InputStream.available.
+ * @throws IOException
+ */
+ @Test
+ public void test_0314_SkipAndAvailableAndPositionV2() throws IOException {
+ validateSkipAndAvailableAndPosition(
+ accountUsingInputStreamV1.getFileSystem());
+ }
+
+ private void validateSkipAndAvailableAndPosition(FileSystem fs)
+ throws IOException {
+ assumeHugeFileExists();
+ try (
+ FSDataInputStream inputStream = fs.open(TEST_FILE_PATH);
+ ) {
+ byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'};
+ byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'};
+ byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'};
+ byte[] expected4 = {(byte) 'g', (byte) 'h', (byte) 'i'};
+
+ assertEquals(testFileLength, inputStream.available());
+ assertEquals(0, inputStream.getPos());
+
+ int n = 3;
+ long skipped = inputStream.skip(n);
+
+ assertEquals(skipped, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ assertEquals(skipped, n);
+
+ byte[] buffer = new byte[3];
+ int bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected2, buffer);
+ assertEquals(buffer.length + skipped, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ // does skip still work after seek?
+ int seekPos = 1;
+ inputStream.seek(seekPos);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected3, buffer);
+ assertEquals(buffer.length + seekPos, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+
+ long currentPosition = inputStream.getPos();
+ n = 2;
+ skipped = inputStream.skip(n);
+
+ assertEquals(currentPosition + skipped, inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ assertEquals(skipped, n);
+
+ bytesRead = inputStream.read(buffer);
+ assertEquals(buffer.length, bytesRead);
+ assertArrayEquals(expected4, buffer);
+ assertEquals(buffer.length + skipped + currentPosition,
+ inputStream.getPos());
+ assertEquals(testFileLength - inputStream.getPos(),
+ inputStream.available());
+ }
+ }
+
+ /**
+ * Ensures parity in the performance of sequential read for
+ * version 1 and version 2 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0315_SequentialReadPerformance() throws IOException {
+ assumeHugeFileExists();
+ final int maxAttempts = 10;
+ final double maxAcceptableRatio = 1.01;
+ double v1ElapsedMs = 0, v2ElapsedMs = 0;
+ double ratio = Double.MAX_VALUE;
+ for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+ v1ElapsedMs = sequentialRead(1,
+ accountUsingInputStreamV1.getFileSystem(), false);
+ v2ElapsedMs = sequentialRead(2,
+ accountUsingInputStreamV2.getFileSystem(), false);
+ ratio = v2ElapsedMs / v1ElapsedMs;
+ LOG.info(String.format(
+ "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio));
+ }
+ assertTrue(String.format(
+ "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
+ + " v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio),
+ ratio < maxAcceptableRatio);
+ }
+
+ /**
+ * Ensures parity in the performance of sequential read after reverse seek for
+ * version 2 of the block blob input stream.
+ * @throws IOException
+ */
+ @Test
+ public void test_0316_SequentialReadAfterReverseSeekPerformanceV2()
+ throws IOException {
+ assumeHugeFileExists();
+ final int maxAttempts = 10;
+ final double maxAcceptableRatio = 1.01;
+ double beforeSeekElapsedMs = 0, afterSeekElapsedMs = 0;
+ double ratio = Double.MAX_VALUE;
+ for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+ beforeSeekElapsedMs = sequentialRead(2,
+ accountUsingInputStreamV2.getFileSystem(), false);
+ afterSeekElapsedMs = sequentialRead(2,
+ accountUsingInputStreamV2.getFileSystem(), true);
+ ratio = afterSeekElapsedMs / beforeSeekElapsedMs;
+ LOG.info(String.format(
+ "beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d, ratio=%3$.2f",
+ (long) beforeSeekElapsedMs,
+ (long) afterSeekElapsedMs,
+ ratio));
+ }
+ assertTrue(String.format(
+ "Performance of version 2 after reverse seek is not acceptable:"
+ + " beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d,"
+ + " ratio=%3$.2f",
+ (long) beforeSeekElapsedMs,
+ (long) afterSeekElapsedMs,
+ ratio),
+ ratio < maxAcceptableRatio);
+ }
+
+ private long sequentialRead(int version,
+ FileSystem fs,
+ boolean afterReverseSeek) throws IOException {
+ byte[] buffer = new byte[16 * KILOBYTE];
+ long totalBytesRead = 0;
+ long bytesRead = 0;
+
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ if (afterReverseSeek) {
+ while (bytesRead > 0 && totalBytesRead < 4 * MEGABYTE) {
+ bytesRead = inputStream.read(buffer);
+ totalBytesRead += bytesRead;
+ }
+ totalBytesRead = 0;
+ inputStream.seek(0);
+ }
+
+ NanoTimer timer = new NanoTimer();
+ while ((bytesRead = inputStream.read(buffer)) > 0) {
+ totalBytesRead += bytesRead;
+ }
+ long elapsedTimeMs = timer.elapsedTimeMs();
+
+ LOG.info(String.format(
+ "v%1$d: bytesRead=%2$d, elapsedMs=%3$d, Mbps=%4$.2f,"
+ + " afterReverseSeek=%5$s",
+ version,
+ totalBytesRead,
+ elapsedTimeMs,
+ toMbps(totalBytesRead, elapsedTimeMs),
+ afterReverseSeek));
+
+ assertEquals(testFileLength, totalBytesRead);
+ inputStream.close();
+ return elapsedTimeMs;
+ }
+ }
+
+ @Test
+ public void test_0317_RandomReadPerformance() throws IOException {
+ assumeHugeFileExists();
+ final int maxAttempts = 10;
+ final double maxAcceptableRatio = 0.10;
+ double v1ElapsedMs = 0, v2ElapsedMs = 0;
+ double ratio = Double.MAX_VALUE;
+ for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) {
+ v1ElapsedMs = randomRead(1,
+ accountUsingInputStreamV1.getFileSystem());
+ v2ElapsedMs = randomRead(2,
+ accountUsingInputStreamV2.getFileSystem());
+ ratio = v2ElapsedMs / v1ElapsedMs;
+ LOG.info(String.format(
+ "v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio));
+ }
+ assertTrue(String.format(
+ "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d,"
+ + " v2ElapsedMs=%2$d, ratio=%3$.2f",
+ (long) v1ElapsedMs,
+ (long) v2ElapsedMs,
+ ratio),
+ ratio < maxAcceptableRatio);
+ }
+
+ private long randomRead(int version, FileSystem fs) throws IOException {
+ assumeHugeFileExists();
+ final int minBytesToRead = 2 * MEGABYTE;
+ Random random = new Random();
+ byte[] buffer = new byte[8 * KILOBYTE];
+ long totalBytesRead = 0;
+ long bytesRead = 0;
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) {
+ NanoTimer timer = new NanoTimer();
+
+ do {
+ bytesRead = inputStream.read(buffer);
+ totalBytesRead += bytesRead;
+ inputStream.seek(random.nextInt(
+ (int) (testFileLength - buffer.length)));
+ } while (bytesRead > 0 && totalBytesRead < minBytesToRead);
+
+ long elapsedTimeMs = timer.elapsedTimeMs();
+
+ inputStream.close();
+
+ LOG.info(String.format(
+ "v%1$d: totalBytesRead=%2$d, elapsedTimeMs=%3$d, Mbps=%4$.2f",
+ version,
+ totalBytesRead,
+ elapsedTimeMs,
+ toMbps(totalBytesRead, elapsedTimeMs)));
+
+ assertTrue(minBytesToRead <= totalBytesRead);
+
+ return elapsedTimeMs;
+ }
+ }
+
+ @Test
+ public void test_999_DeleteHugeFiles() throws IOException {
+ try {
+ NanoTimer timer = new NanoTimer();
+ NativeAzureFileSystem fs = getFileSystem();
+ fs.delete(TEST_FILE_PATH, false);
+ timer.end("time to delete %s", TEST_FILE_PATH);
+ } finally {
+ // clean up the test account
+ AzureTestUtils.cleanupTestAccount(accountUsingInputStreamV1);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
new file mode 100644
index 0000000..cc3baf5
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assume.assumeNotNull;
+
+import java.io.FileNotFoundException;
+import java.util.EnumSet;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+/**
+ * Tests that WASB creates containers only if needed.
+ */
+public class ITestContainerChecks extends AbstractWasbTestWithTimeout {
+ private AzureBlobStorageTestAccount testAccount;
+ private boolean runningInSASMode = false;
+
+ @After
+ public void tearDown() throws Exception {
+ testAccount = AzureTestUtils.cleanup(testAccount);
+ }
+
+ @Before
+ public void setMode() {
+ runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
+ getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
+ }
+
+ @Test
+ public void testContainerExistAfterDoesNotExist() throws Exception {
+ testAccount = blobStorageTestAccount();
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Starting off with the container not there
+ assertFalse(container.exists());
+
+ // A list shouldn't create the container and will set file system store
+ // state to DoesNotExist
+ try {
+ fs.listStatus(new Path("/"));
+ assertTrue("Should've thrown.", false);
+ } catch (FileNotFoundException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("does not exist."));
+ }
+ assertFalse(container.exists());
+
+ // Create a container outside of the WASB FileSystem
+ container.create();
+ // Add a file to the container outside of the WASB FileSystem
+ CloudBlockBlob blob = testAccount.getBlobReference("foo");
+ BlobOutputStream outputStream = blob.openOutputStream();
+ outputStream.write(new byte[10]);
+ outputStream.close();
+
+ // Make sure the file is visible
+ assertTrue(fs.exists(new Path("/foo")));
+ assertTrue(container.exists());
+ }
+
+ protected AzureBlobStorageTestAccount blobStorageTestAccount()
+ throws Exception {
+ return AzureBlobStorageTestAccount.create("",
+ EnumSet.noneOf(CreateOptions.class));
+ }
+
+ @Test
+ public void testContainerCreateAfterDoesNotExist() throws Exception {
+ testAccount = blobStorageTestAccount();
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Starting off with the container not there
+ assertFalse(container.exists());
+
+ // A list shouldn't create the container and will set file system store
+ // state to DoesNotExist
+ try {
+ assertNull(fs.listStatus(new Path("/")));
+ assertTrue("Should've thrown.", false);
+ } catch (FileNotFoundException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("does not exist."));
+ }
+ assertFalse(container.exists());
+
+ // Create a container outside of the WASB FileSystem
+ container.create();
+
+ // Write should succeed
+ assertTrue(fs.createNewFile(new Path("/foo")));
+ assertTrue(container.exists());
+ }
+
+ @Test
+ public void testContainerCreateOnWrite() throws Exception {
+ testAccount = blobStorageTestAccount();
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Starting off with the container not there
+ assertFalse(container.exists());
+
+ // A list shouldn't create the container.
+ try {
+ fs.listStatus(new Path("/"));
+ assertTrue("Should've thrown.", false);
+ } catch (FileNotFoundException ex) {
+ assertTrue("Unexpected exception: " + ex,
+ ex.getMessage().contains("does not exist."));
+ }
+ assertFalse(container.exists());
+
+ // Neither should a read.
+ Path foo = new Path("/testContainerCreateOnWrite-foo");
+ Path bar = new Path("/testContainerCreateOnWrite-bar");
+ LambdaTestUtils.intercept(FileNotFoundException.class,
+ new Callable<String>() {
+ @Override
+ public String call() throws Exception {
+ fs.open(foo).close();
+ return "Stream to " + foo;
+ }
+ }
+ );
+ assertFalse(container.exists());
+
+ // Neither should a rename
+ assertFalse(fs.rename(foo, bar));
+ assertFalse(container.exists());
+
+ // But a write should.
+ assertTrue(fs.createNewFile(foo));
+ assertTrue(container.exists());
+ }
+
+ @Test
+ public void testContainerChecksWithSas() throws Exception {
+
+ Assume.assumeFalse(runningInSASMode);
+ testAccount = AzureBlobStorageTestAccount.create("",
+ EnumSet.of(CreateOptions.UseSas));
+ assumeNotNull(testAccount);
+ CloudBlobContainer container = testAccount.getRealContainer();
+ FileSystem fs = testAccount.getFileSystem();
+
+ // The container shouldn't be there
+ assertFalse(container.exists());
+
+ // A write should just fail
+ try {
+ fs.createNewFile(new Path("/testContainerChecksWithSas-foo"));
+ assertFalse("Should've thrown.", true);
+ } catch (AzureException ex) {
+ }
+ assertFalse(container.exists());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java
new file mode 100644
index 0000000..a45dae4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java
@@ -0,0 +1,283 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.FileNotFoundException;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.After;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*;
+
+/**
+ * Single threaded exception handling.
+ */
+public class ITestFileSystemOperationExceptionHandling
+ extends AbstractWasbTestBase {
+
+ private FSDataInputStream inputStream = null;
+
+ private Path testPath;
+ private Path testFolderPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = path("testfile.dat");
+ testFolderPath = path("testfolder");
+ }
+
+ /**
+ * Helper method that creates a InputStream to validate exceptions
+ * for various scenarios.
+ */
+ private void setupInputStreamToTest(AzureBlobStorageTestAccount testAccount)
+ throws Exception {
+
+ FileSystem fs = testAccount.getFileSystem();
+
+ // Step 1: Create a file and write dummy data.
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path testFilePath2 = new Path(base, "test2.dat");
+ FSDataOutputStream outputStream = fs.create(testFilePath1);
+ String testString = "This is a test string";
+ outputStream.write(testString.getBytes());
+ outputStream.close();
+
+ // Step 2: Open a read stream on the file.
+ inputStream = fs.open(testFilePath1);
+
+ // Step 3: Rename the file
+ fs.rename(testFilePath1, testFilePath2);
+ }
+
+ /**
+ * Tests a basic single threaded read scenario for Page blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingleThreadedPageBlobReadScenario() throws Throwable {
+ AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+ setupInputStreamToTest(testAccount);
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Tests a basic single threaded seek scenario for Page blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingleThreadedPageBlobSeekScenario() throws Throwable {
+ AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+ setupInputStreamToTest(testAccount);
+ inputStream.seek(5);
+ }
+
+ /**
+ * Test a basic single thread seek scenario for Block blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingleThreadBlockBlobSeekScenario() throws Throwable {
+
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ setupInputStreamToTest(testAccount);
+ inputStream.seek(5);
+ inputStream.read();
+ }
+
+ /**
+ * Tests a basic single threaded read scenario for Block blobs.
+ */
+ @Test(expected=FileNotFoundException.class)
+ public void testSingledThreadBlockBlobReadScenario() throws Throwable{
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ setupInputStreamToTest(testAccount);
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobSetPermissionScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(), testPath);
+ fs.delete(testPath, true);
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobSetPermissionScenario()
+ throws Throwable {
+ createEmptyFile(getPageBlobTestStorageAccount(), testPath);
+ fs.delete(testPath, true);
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobSetOwnerScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(), testPath);
+ fs.delete(testPath, true);
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic single threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobSetOwnerScenario() throws Throwable {
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobListStatusScenario() throws Throwable {
+ createTestFolder(createTestAccount(),
+ testFolderPath);
+ fs.delete(testFolderPath, true);
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobListStatusScenario() throws Throwable {
+ createTestFolder(getPageBlobTestStorageAccount(),
+ testFolderPath);
+ fs.delete(testFolderPath, true);
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedBlockBlobRenameScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ Path dstPath = new Path("dstFile.dat");
+ fs.delete(testPath, true);
+ boolean renameResult = fs.rename(testPath, dstPath);
+ assertFalse(renameResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedPageBlobRenameScenario() throws Throwable {
+
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ Path dstPath = new Path("dstFile.dat");
+ fs.delete(testPath, true);
+ boolean renameResult = fs.rename(testPath, dstPath);
+ assertFalse(renameResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedBlockBlobDeleteScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ boolean deleteResult = fs.delete(testPath, true);
+ assertFalse(deleteResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test
+ public void testSingleThreadedPageBlobDeleteScenario() throws Throwable {
+
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ boolean deleteResult = fs.delete(testPath, true);
+ assertFalse(deleteResult);
+ }
+
+ /**
+ * Test basic single threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedBlockBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ inputStream = fs.open(testPath);
+ }
+
+ /**
+ * Test delete then open a file.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testSingleThreadedPageBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(getPageBlobTestStorageAccount(),
+ testPath);
+ fs.delete(testPath, true);
+ inputStream = fs.open(testPath);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (inputStream != null) {
+ inputStream.close();
+ }
+
+ ContractTestUtils.rm(fs, testPath, true, true);
+ super.tearDown();
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount()
+ throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java
new file mode 100644
index 0000000..6d5e72e
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionMessage.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.net.URI;
+import java.util.UUID;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.microsoft.azure.storage.CloudStorageAccount;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
+
+/**
+ * Test for error messages coming from SDK.
+ */
+public class ITestFileSystemOperationExceptionMessage
+ extends AbstractWasbTestWithTimeout {
+
+
+
+ @Test
+ public void testAnonymouseCredentialExceptionMessage() throws Throwable {
+
+ Configuration conf = AzureBlobStorageTestAccount.createTestConfiguration();
+ CloudStorageAccount account =
+ AzureBlobStorageTestAccount.createTestAccount(conf);
+ AzureTestUtils.assume("No test account", account != null);
+
+ String testStorageAccount = conf.get("fs.azure.test.account.name");
+ conf = new Configuration();
+ conf.set("fs.AbstractFileSystem.wasb.impl",
+ "org.apache.hadoop.fs.azure.Wasb");
+ conf.set("fs.azure.skip.metrics", "true");
+
+ String testContainer = UUID.randomUUID().toString();
+ String wasbUri = String.format("wasb://%s@%s",
+ testContainer, testStorageAccount);
+
+ try(NativeAzureFileSystem filesystem = new NativeAzureFileSystem()) {
+ filesystem.initialize(new URI(wasbUri), conf);
+ fail("Expected an exception, got " + filesystem);
+ } catch (Exception ex) {
+
+ Throwable innerException = ex.getCause();
+ while (innerException != null
+ && !(innerException instanceof AzureException)) {
+ innerException = innerException.getCause();
+ }
+
+ if (innerException != null) {
+ GenericTestUtils.assertExceptionContains(String.format(
+ NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
+ ex);
+ } else {
+ fail("No inner azure exception");
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
new file mode 100644
index 0000000..175a9ec
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.FileNotFoundException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*;
+
+/**
+ * Multithreaded operations on FS, verify failures are as expected.
+ */
+public class ITestFileSystemOperationsExceptionHandlingMultiThreaded
+ extends AbstractWasbTestBase {
+
+ FSDataInputStream inputStream = null;
+
+ private Path testPath;
+ private Path testFolderPath;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ testPath = path("testfile.dat");
+ testFolderPath = path("testfolder");
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.create();
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+
+ IOUtils.closeStream(inputStream);
+ ContractTestUtils.rm(fs, testPath, true, false);
+ ContractTestUtils.rm(fs, testFolderPath, true, false);
+ super.tearDown();
+ }
+
+ /**
+ * Helper method to creates an input stream to test various scenarios.
+ */
+ private void getInputStreamToTest(FileSystem fs, Path testPath)
+ throws Throwable {
+
+ FSDataOutputStream outputStream = fs.create(testPath);
+ String testString = "This is a test string";
+ outputStream.write(testString.getBytes());
+ outputStream.close();
+
+ inputStream = fs.open(testPath);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded read
+ * scenario for block blobs.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobReadScenario() throws Throwable {
+
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded seek
+ * scenario for block blobs.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadBlockBlobSeekScenario() throws Throwable {
+
+/*
+ AzureBlobStorageTestAccount testAccount = createTestAccount();
+ fs = testAccount.getFileSystem();
+*/
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+
+ inputStream.seek(5);
+ inputStream.read();
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobSetPermissionScenario()
+ throws Throwable {
+ createEmptyFile(
+ getPageBlobTestStorageAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobSetPermissionScenario()
+ throws Throwable {
+ createEmptyFile(createTestAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+ fs.setPermission(testPath,
+ new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+
+ /**
+ * Tests basic multi threaded setPermission scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable {
+
+ createEmptyFile(
+ getPageBlobTestStorageAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+
+ while (t.isAlive()) {
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+ inputStream = fs.open(testPath);
+ inputStream.close();
+ }
+
+ /**
+ * Tests basic multi threaded setOwner scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable {
+
+ createEmptyFile(createTestAccount(), testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic multi threaded setOwner scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable {
+ createEmptyFile(
+ getPageBlobTestStorageAccount(),
+ testPath);
+ Thread t = new Thread(new DeleteThread(fs, testPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+ fs.setOwner(testPath, "testowner", "testgroup");
+ }
+
+ /**
+ * Tests basic multi threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable {
+
+ createTestFolder(createTestAccount(),
+ testFolderPath);
+ Thread t = new Thread(new DeleteThread(fs, testFolderPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.listStatus(testFolderPath);
+ }
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Tests basic multi threaded listStatus scenario.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobListStatusScenario() throws Throwable {
+
+ createTestFolder(
+ getPageBlobTestStorageAccount(),
+ testFolderPath);
+ Thread t = new Thread(new DeleteThread(fs, testFolderPath));
+ t.start();
+ while (t.isAlive()) {
+ fs.listStatus(testFolderPath);
+ }
+ fs.listStatus(testFolderPath);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded read
+ * scenario for page blobs.
+ */
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobReadScenario() throws Throwable {
+
+ bindToTestAccount(getPageBlobTestStorageAccount());
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+ byte[] readBuffer = new byte[512];
+ inputStream.read(readBuffer);
+ }
+
+ /**
+ * Test to validate correct exception is thrown for Multithreaded seek
+ * scenario for page blobs.
+ */
+
+ @Test(expected = FileNotFoundException.class)
+ public void testMultiThreadedPageBlobSeekScenario() throws Throwable {
+
+ bindToTestAccount(getPageBlobTestStorageAccount());
+
+ Path base = methodPath();
+ Path testFilePath1 = new Path(base, "test1.dat");
+ Path renamePath = new Path(base, "test2.dat");
+
+ getInputStreamToTest(fs, testFilePath1);
+ Thread renameThread = new Thread(
+ new RenameThread(fs, testFilePath1, renamePath));
+ renameThread.start();
+
+ renameThread.join();
+ inputStream.seek(5);
+ }
+
+
+ /**
+ * Helper thread that just renames the test file.
+ */
+ private static class RenameThread implements Runnable {
+
+ private final FileSystem fs;
+ private final Path testPath;
+ private final Path renamePath;
+
+ RenameThread(FileSystem fs,
+ Path testPath,
+ Path renamePath) {
+ this.fs = fs;
+ this.testPath = testPath;
+ this.renamePath = renamePath;
+ }
+
+ @Override
+ public void run() {
+ try {
+ fs.rename(testPath, renamePath);
+ } catch (Exception e) {
+ // Swallowing the exception as the
+ // correctness of the test is controlled
+ // by the other thread
+ }
+ }
+ }
+
+ private static class DeleteThread implements Runnable {
+ private final FileSystem fs;
+ private final Path testPath;
+
+ DeleteThread(FileSystem fs, Path testPath) {
+ this.fs = fs;
+ this.testPath = testPath;
+ }
+
+ @Override
+ public void run() {
+ try {
+ fs.delete(testPath, true);
+ } catch (Exception e) {
+ // Swallowing the exception as the
+ // correctness of the test is controlled
+ // by the other thread
+ }
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
deleted file mode 100644
index f6ab94d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.FileNotFoundException;
-import java.util.EnumSet;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.blob.BlobOutputStream;
-import com.microsoft.azure.storage.blob.CloudBlobContainer;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-/**
- * Tests that WASB creates containers only if needed.
- */
-public class TestContainerChecks {
- private AzureBlobStorageTestAccount testAccount;
- private boolean runningInSASMode = false;
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
- }
-
- @Before
- public void setMode() {
- runningInSASMode = AzureBlobStorageTestAccount.createTestConfiguration().
- getBoolean(AzureNativeFileSystemStore.KEY_USE_SECURE_MODE, false);
- }
-
- @Test
- public void testContainerExistAfterDoesNotExist() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.noneOf(CreateOptions.class));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // Starting off with the container not there
- assertFalse(container.exists());
-
- // A list shouldn't create the container and will set file system store
- // state to DoesNotExist
- try {
- fs.listStatus(new Path("/"));
- assertTrue("Should've thrown.", false);
- } catch (FileNotFoundException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("does not exist."));
- }
- assertFalse(container.exists());
-
- // Create a container outside of the WASB FileSystem
- container.create();
- // Add a file to the container outside of the WASB FileSystem
- CloudBlockBlob blob = testAccount.getBlobReference("foo");
- BlobOutputStream outputStream = blob.openOutputStream();
- outputStream.write(new byte[10]);
- outputStream.close();
-
- // Make sure the file is visible
- assertTrue(fs.exists(new Path("/foo")));
- assertTrue(container.exists());
- }
-
- @Test
- public void testContainerCreateAfterDoesNotExist() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.noneOf(CreateOptions.class));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // Starting off with the container not there
- assertFalse(container.exists());
-
- // A list shouldn't create the container and will set file system store
- // state to DoesNotExist
- try {
- assertNull(fs.listStatus(new Path("/")));
- assertTrue("Should've thrown.", false);
- } catch (FileNotFoundException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("does not exist."));
- }
- assertFalse(container.exists());
-
- // Create a container outside of the WASB FileSystem
- container.create();
-
- // Write should succeed
- assertTrue(fs.createNewFile(new Path("/foo")));
- assertTrue(container.exists());
- }
-
- @Test
- public void testContainerCreateOnWrite() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.noneOf(CreateOptions.class));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // Starting off with the container not there
- assertFalse(container.exists());
-
- // A list shouldn't create the container.
- try {
- fs.listStatus(new Path("/"));
- assertTrue("Should've thrown.", false);
- } catch (FileNotFoundException ex) {
- assertTrue("Unexpected exception: " + ex,
- ex.getMessage().contains("does not exist."));
- }
- assertFalse(container.exists());
-
- // Neither should a read.
- try {
- fs.open(new Path("/foo"));
- assertFalse("Should've thrown.", true);
- } catch (FileNotFoundException ex) {
- }
- assertFalse(container.exists());
-
- // Neither should a rename
- assertFalse(fs.rename(new Path("/foo"), new Path("/bar")));
- assertFalse(container.exists());
-
- // But a write should.
- assertTrue(fs.createNewFile(new Path("/foo")));
- assertTrue(container.exists());
- }
-
- @Test
- public void testContainerChecksWithSas() throws Exception {
-
- Assume.assumeFalse(runningInSASMode);
- testAccount = AzureBlobStorageTestAccount.create("",
- EnumSet.of(CreateOptions.UseSas));
- assumeNotNull(testAccount);
- CloudBlobContainer container = testAccount.getRealContainer();
- FileSystem fs = testAccount.getFileSystem();
-
- // The container shouldn't be there
- assertFalse(container.exists());
-
- // A write should just fail
- try {
- fs.createNewFile(new Path("/foo"));
- assertFalse("Should've thrown.", true);
- } catch (AzureException ex) {
- }
- assertFalse(container.exists());
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
deleted file mode 100644
index 9ac25dd..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.FileNotFoundException;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
-
-public class TestFileSystemOperationExceptionHandling
- extends AbstractWasbTestBase {
-
- private FSDataInputStream inputStream = null;
-
- private static Path testPath = new Path("testfile.dat");
-
- private static Path testFolderPath = new Path("testfolder");
-
- /*
- * Helper method that creates a InputStream to validate exceptions
- * for various scenarios
- */
- private void setupInputStreamToTest(AzureBlobStorageTestAccount testAccount)
- throws Exception {
-
- FileSystem fs = testAccount.getFileSystem();
-
- // Step 1: Create a file and write dummy data.
- Path testFilePath1 = new Path("test1.dat");
- Path testFilePath2 = new Path("test2.dat");
- FSDataOutputStream outputStream = fs.create(testFilePath1);
- String testString = "This is a test string";
- outputStream.write(testString.getBytes());
- outputStream.close();
-
- // Step 2: Open a read stream on the file.
- inputStream = fs.open(testFilePath1);
-
- // Step 3: Rename the file
- fs.rename(testFilePath1, testFilePath2);
- }
-
- /*
- * Tests a basic single threaded read scenario for Page blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingleThreadedPageBlobReadScenario() throws Throwable {
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- setupInputStreamToTest(testAccount);
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- /*
- * Tests a basic single threaded seek scenario for Page blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingleThreadedPageBlobSeekScenario() throws Throwable {
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- setupInputStreamToTest(testAccount);
- inputStream.seek(5);
- }
-
- /*
- * Test a basic single thread seek scenario for Block blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingleThreadBlockBlobSeekScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- setupInputStreamToTest(testAccount);
- inputStream.seek(5);
- inputStream.read();
- }
-
- /*
- * Tests a basic single threaded read scenario for Block blobs.
- */
- @Test(expected=FileNotFoundException.class)
- public void testSingledThreadBlockBlobReadScenario() throws Throwable{
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- setupInputStreamToTest(testAccount);
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedBlockBlobSetPermissionScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
- fs.delete(testPath, true);
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedPageBlobSetPermissionScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedBlockBlobSetOwnerScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
- fs.delete(testPath, true);
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic single threaded setPermission scenario
- */
- public void testSingleThreadedPageBlobSetOwnerScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobListStatusScenario() throws Throwable {
- ExceptionHandlingTestHelper.createTestFolder(createTestAccount(), testFolderPath);
- fs.delete(testFolderPath, true);
- fs.listStatus(testFolderPath);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basica single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobListStatusScenario() throws Throwable {
- ExceptionHandlingTestHelper.createTestFolder(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testFolderPath);
- fs.delete(testFolderPath, true);
- fs.listStatus(testFolderPath);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobRenameScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- Path dstPath = new Path("dstFile.dat");
- fs.delete(testPath, true);
- boolean renameResult = fs.rename(testPath, dstPath);
- Assert.assertFalse(renameResult);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobRenameScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Path dstPath = new Path("dstFile.dat");
- fs.delete(testPath, true);
- boolean renameResult = fs.rename(testPath, dstPath);
- Assert.assertFalse(renameResult);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobDeleteScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- fs.delete(testPath, true);
- boolean deleteResult = fs.delete(testPath, true);
- Assert.assertFalse(deleteResult);
- }
-
- @Test
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobDeleteScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- boolean deleteResult = fs.delete(testPath, true);
- Assert.assertFalse(deleteResult);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedBlockBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- fs.delete(testPath, true);
- inputStream = fs.open(testPath);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Test basic single threaded listStatus scenario
- */
- public void testSingleThreadedPageBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- fs.delete(testPath, true);
- inputStream = fs.open(testPath);
- }
-
- @After
- public void tearDown() throws Exception {
- if (inputStream != null) {
- inputStream.close();
- }
-
- if (fs != null && fs.exists(testPath)) {
- fs.delete(testPath, true);
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
deleted file mode 100644
index e619817..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-import java.net.URI;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
-
-
-public class TestFileSystemOperationExceptionMessage extends
- NativeAzureFileSystemBaseTest {
-
- @Test
- public void testAnonymouseCredentialExceptionMessage() throws Throwable{
-
- Configuration conf = AzureBlobStorageTestAccount.createTestConfiguration();
- String testStorageAccount = conf.get("fs.azure.test.account.name");
- conf = new Configuration();
- conf.set("fs.AbstractFileSystem.wasb.impl", "org.apache.hadoop.fs.azure.Wasb");
- conf.set("fs.azure.skip.metrics", "true");
-
- String testContainer = UUID.randomUUID().toString();
- String wasbUri = String.format("wasb://%s@%s",
- testContainer, testStorageAccount);
-
- fs = new NativeAzureFileSystem();
- try {
- fs.initialize(new URI(wasbUri), conf);
- } catch (Exception ex) {
-
- Throwable innerException = ex.getCause();
- while (innerException != null
- && !(innerException instanceof AzureException)) {
- innerException = innerException.getCause();
- }
-
- if (innerException != null) {
- String exceptionMessage = innerException.getMessage();
- if (exceptionMessage == null
- || exceptionMessage.length() == 0) {
- Assert.fail();}
- else {
- GenericTestUtils.assertExceptionContains(String.format(
- NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
- ex);
- }
- } else {
- Assert.fail();
- }
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
deleted file mode 100644
index 1cd18ee..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.FileNotFoundException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.Test;
-
-public class TestFileSystemOperationsExceptionHandlingMultiThreaded
- extends AbstractWasbTestBase {
-
- FSDataInputStream inputStream = null;
-
- private static Path testPath = new Path("testfile.dat");
- private static Path testFolderPath = new Path("testfolder");
-
-
- /*
- * Helper method to creates an input stream to test various scenarios.
- */
- private void getInputStreamToTest(FileSystem fs, Path testPath) throws Throwable {
-
- FSDataOutputStream outputStream = fs.create(testPath);
- String testString = "This is a test string";
- outputStream.write(testString.getBytes());
- outputStream.close();
-
- inputStream = fs.open(testPath);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded read
- * scenario for block blobs
- */
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadedBlockBlobReadScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
-
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded seek
- * scenario for block blobs
- */
-
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadBlockBlobSeekScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = createTestAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
-
- inputStream.seek(5);
- inputStream.read();
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedPageBlobSetPermissionScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
- fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedPageBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- inputStream = fs.open(testPath);
- inputStream.close();
- }
-
- inputStream = fs.open(testPath);
- inputStream.close();
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setPermission scenario
- */
- public void testMultiThreadedBlockBlobOpenScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
-
- while (t.isAlive()) {
- inputStream = fs.open(testPath);
- inputStream.close();
- }
- inputStream = fs.open(testPath);
- inputStream.close();
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setOwner scenario
- */
- public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setOwner(testPath, "testowner", "testgroup");
- }
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded setOwner scenario
- */
- public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable {
- ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testPath);
- Thread t = new Thread(new DeleteThread(fs, testPath));
- t.start();
- while (t.isAlive()) {
- fs.setOwner(testPath, "testowner", "testgroup");
- }
- fs.setOwner(testPath, "testowner", "testgroup");
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded listStatus scenario
- */
- public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createTestFolder(createTestAccount(), testFolderPath);
- Thread t = new Thread(new DeleteThread(fs, testFolderPath));
- t.start();
- while (t.isAlive()) {
- fs.listStatus(testFolderPath);
- }
- fs.listStatus(testFolderPath);
- }
-
- @Test(expected=FileNotFoundException.class)
- /*
- * Tests basic multi threaded listStatus scenario
- */
- public void testMultiThreadedPageBlobListStatusScenario() throws Throwable {
-
- ExceptionHandlingTestHelper.createTestFolder(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
- testFolderPath);
- Thread t = new Thread(new DeleteThread(fs, testFolderPath));
- t.start();
- while (t.isAlive()) {
- fs.listStatus(testFolderPath);
- }
- fs.listStatus(testFolderPath);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded read
- * scenario for page blobs
- */
-
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadedPageBlobReadScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
- byte[] readBuffer = new byte[512];
- inputStream.read(readBuffer);
- }
-
- /*
- * Test to validate correct exception is thrown for Multithreaded seek
- * scenario for page blobs
- */
-
- @Test(expected=FileNotFoundException.class)
- public void testMultiThreadedPageBlobSeekScenario() throws Throwable {
-
- AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
- fs = testAccount.getFileSystem();
- Path testFilePath1 = new Path("test1.dat");
-
- getInputStreamToTest(fs, testFilePath1);
- Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
- renameThread.start();
-
- renameThread.join();
- inputStream.seek(5);
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- @After
- public void tearDown() throws Exception {
-
- if (inputStream != null) {
- inputStream.close();
- }
-
- if (fs != null && fs.exists(testPath)) {
- fs.delete(testPath, true);
- }
- }
-}
-
-/*
- * Helper thread that just renames the test file.
- */
-class RenameThread implements Runnable {
-
- private FileSystem fs;
- private Path testPath;
- private Path renamePath = new Path("test2.dat");
-
- public RenameThread(FileSystem fs, Path testPath) {
- this.fs = fs;
- this.testPath = testPath;
- }
-
- @Override
- public void run(){
- try {
- fs.rename(testPath, renamePath);
- }catch (Exception e) {
- // Swallowing the exception as the
- // correctness of the test is controlled
- // by the other thread
- }
- }
-}
-
-class DeleteThread implements Runnable {
- private FileSystem fs;
- private Path testPath;
-
- public DeleteThread(FileSystem fs, Path testPath) {
- this.fs = fs;
- this.testPath = testPath;
- }
-
- @Override
- public void run() {
- try {
- fs.delete(testPath, true);
- } catch (Exception e) {
- // Swallowing the exception as the
- // correctness of the test is controlled
- // by the other thread
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
deleted file mode 100644
index fd3690c..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
+++ /dev/null
@@ -1,821 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-/**
- * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
- */
-public class TestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
-
- private final int renameThreads = 10;
- private final int deleteThreads = 20;
- private int iterations = 1;
- private LogCapturer logs = null;
-
- @Rule
- public ExpectedException exception = ExpectedException.none();
-
- @Before
- public void setUp() throws Exception {
- super.setUp();
- Configuration conf = fs.getConf();
-
- // By default enable parallel threads for rename and delete operations.
- // Also enable flat listing of blobs for these operations.
- conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, renameThreads);
- conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, deleteThreads);
- conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, true);
-
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- // Capture logs
- logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
- .getRootLogger()));
- }
-
- /*
- * Helper method to create sub directory and different types of files
- * for multiple iterations.
- */
- private void createFolder(FileSystem fs, String root) throws Exception {
- fs.mkdirs(new Path(root));
- for (int i = 0; i < this.iterations; i++) {
- fs.mkdirs(new Path(root + "/" + i));
- fs.createNewFile(new Path(root + "/" + i + "/fileToRename"));
- fs.createNewFile(new Path(root + "/" + i + "/file/to/rename"));
- fs.createNewFile(new Path(root + "/" + i + "/file+to%rename"));
- fs.createNewFile(new Path(root + "/fileToRename" + i));
- }
- }
-
- /*
- * Helper method to do rename operation and validate all files in source folder
- * doesn't exists and similar files exists in new folder.
- */
- private void validateRenameFolder(FileSystem fs, String source, String dest) throws Exception {
- // Create source folder with files.
- createFolder(fs, source);
- Path sourceFolder = new Path(source);
- Path destFolder = new Path(dest);
-
- // rename operation
- assertTrue(fs.rename(sourceFolder, destFolder));
- assertTrue(fs.exists(destFolder));
-
- for (int i = 0; i < this.iterations; i++) {
- // Check destination folder and files exists.
- assertTrue(fs.exists(new Path(dest + "/" + i)));
- assertTrue(fs.exists(new Path(dest + "/" + i + "/fileToRename")));
- assertTrue(fs.exists(new Path(dest + "/" + i + "/file/to/rename")));
- assertTrue(fs.exists(new Path(dest + "/" + i + "/file+to%rename")));
- assertTrue(fs.exists(new Path(dest + "/fileToRename" + i)));
-
- // Check source folder and files doesn't exists.
- assertFalse(fs.exists(new Path(source + "/" + i)));
- assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
- assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
- }
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameSmallFolderWithThreads() throws Exception {
-
- validateRenameFolder(fs, "root", "rootnew");
-
- // With single iteration, we would have created 7 blobs.
- int expectedThreadsCreated = Math.min(7, renameThreads);
-
- // Validate from logs that threads are created.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + expectedThreadsCreated);
-
- // Validate thread executions
- for (int i = 0; i < expectedThreadsCreated; i++) {
- assertInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
-
- // Also ensure that we haven't spawned extra threads.
- if (expectedThreadsCreated < renameThreads) {
- for (int i = expectedThreadsCreated; i < renameThreads; i++) {
- assertNotInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameLargeFolderWithThreads() throws Exception {
-
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateRenameFolder(fs, "root", "rootnew");
-
- // Validate from logs that threads are created.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + renameThreads);
-
- // Validate thread executions
- for (int i = 0; i < renameThreads; i++) {
- assertInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for rename operation with threads disabled and flat listing enabled.
- */
- @Test
- public void testRenameLargeFolderDisableThreads() throws Exception {
- Configuration conf = fs.getConf();
-
- // Number of threads set to 0 or 1 disables threads.
- conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 0);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateRenameFolder(fs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Rename operation as thread count 0");
-
- // Validate no thread executions
- for (int i = 0; i < renameThreads; i++) {
- String term = "AzureBlobRenameThread-"
- + Thread.currentThread().getName()
- + "-" + i;
- assertNotInLog(content, term);
- }
- }
-
- /**
- * Assert that a log contains the given term.
- * @param content log output
- * @param term search term
- */
- protected void assertInLog(String content, String term) {
- assertTrue("Empty log", !content.isEmpty());
- if (!content.contains(term)) {
- String message = "No " + term + " found in logs";
- LOG.error(message);
- System.err.println(content);
- fail(message);
- }
- }
-
- /**
- * Assert that a log does not contain the given term.
- * @param content log output
- * @param term search term
- */
- protected void assertNotInLog(String content, String term) {
- assertTrue("Empty log", !content.isEmpty());
- if (content.contains(term)) {
- String message = term + " found in logs";
- LOG.error(message);
- System.err.println(content);
- fail(message);
- }
- }
-
- /*
- * Test case for rename operation with threads and flat listing disabled.
- */
- @Test
- public void testRenameSmallFolderDisableThreadsDisableFlatListing() throws Exception {
- Configuration conf = fs.getConf();
- conf = fs.getConf();
-
- // Number of threads set to 0 or 1 disables threads.
- conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 1);
- conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- validateRenameFolder(fs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Rename operation as thread count 1");
-
- // Validate no thread executions
- for (int i = 0; i < renameThreads; i++) {
- assertNotInLog(content,
- "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Helper method to do delete operation and validate all files in source folder
- * doesn't exists after delete operation.
- */
- private void validateDeleteFolder(FileSystem fs, String source) throws Exception {
- // Create folder with files.
- createFolder(fs, "root");
- Path sourceFolder = new Path(source);
-
- // Delete operation
- assertTrue(fs.delete(sourceFolder, true));
- assertFalse(fs.exists(sourceFolder));
-
- for (int i = 0; i < this.iterations; i++) {
- // check that source folder and files doesn't exists
- assertFalse(fs.exists(new Path(source + "/" + i)));
- assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename")));
- assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename")));
- assertFalse(fs.exists(new Path(source + "/fileToRename" + i)));
- }
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteSmallFolderWithThreads() throws Exception {
-
- validateDeleteFolder(fs, "root");
-
- // With single iteration, we would have created 7 blobs.
- int expectedThreadsCreated = Math.min(7, deleteThreads);
-
- // Validate from logs that threads are enabled.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + expectedThreadsCreated);
-
- // Validate thread executions
- for (int i = 0; i < expectedThreadsCreated; i++) {
- assertInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
-
- // Also ensure that we haven't spawned extra threads.
- if (expectedThreadsCreated < deleteThreads) {
- for (int i = expectedThreadsCreated; i < deleteThreads; i++) {
- assertNotInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteLargeFolderWithThreads() throws Exception {
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateDeleteFolder(fs, "root");
-
- // Validate from logs that threads are enabled.
- String content = logs.getOutput();
- assertInLog(content, "ms with threads: " + deleteThreads);
-
- // Validate thread executions
- for (int i = 0; i < deleteThreads; i++) {
- assertInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for delete operation with threads disabled and flat listing enabled.
- */
- @Test
- public void testDeleteLargeFolderDisableThreads() throws Exception {
- Configuration conf = fs.getConf();
- conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 0);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- // Populate source folder with large number of files and directories.
- this.iterations = 10;
- validateDeleteFolder(fs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Delete operation as thread count 0");
-
- // Validate no thread executions
- for (int i = 0; i < deleteThreads; i++) {
- assertNotInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for rename operation with threads and flat listing disabled.
- */
- @Test
- public void testDeleteSmallFolderDisableThreadsDisableFlatListing() throws Exception {
- Configuration conf = fs.getConf();
-
- // Number of threads set to 0 or 1 disables threads.
- conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 1);
- conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- validateDeleteFolder(fs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Disabling threads for Delete operation as thread count 1");
-
- // Validate no thread executions
- for (int i = 0; i < deleteThreads; i++) {
- assertNotInLog(content,
- "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i);
- }
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolExceptionFailure() throws Exception {
-
- // Spy azure file system object and raise exception for new thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
-
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- validateDeleteFolder(mockFs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content, "Failed to create thread pool with threads");
- assertInLog(content, "Serializing the Delete operation");
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolExecuteFailure() throws Exception {
-
- // Mock thread pool executor to throw exception for all requests.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
-
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- validateDeleteFolder(mockFs, "root");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Rejected execution of thread for Delete operation on blob");
- assertInLog(content, "Serializing the Delete operation");
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolExecuteSingleThreadFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- // Create a thread executor and link it to mocked thread pool executor object.
- ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // Mock thread executor to throw exception for all requests.
- Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- validateDeleteFolder(mockFs, "root");
-
- // Validate from logs that threads are enabled and unused threads.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads 7");
- assertInLog(content,
- "6 threads not used for Delete operation on blob");
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteThreadPoolTerminationFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- ((NativeAzureFileSystem) fs).getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
-
- // Create a thread executor and link it to mocked thread pool executor object.
- // Mock thread executor to throw exception for terminating threads.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
- Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
-
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete",
- path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
- boolean exception = false;
- try {
- mockFs.delete(sourceFolder, true);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and delete operation is failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads");
- assertInLog(content, "Threads got interrupted Delete blob operation");
- assertInLog(content,
- "Delete failed as operation on subfolders and files failed.");
- }
-
- /*
- * Validate that when a directory is deleted recursively, the operation succeeds
- * even if a child directory delete fails because the directory does not exist.
- * This can happen if a child directory is deleted by an external agent while
- * the parent is in progress of being deleted recursively.
- */
- @Test
- public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted()
- throws Exception {
- testRecusiveDirectoryDelete(true);
- }
-
- /*
- * Validate that when a directory is deleted recursively, the operation succeeds
- * even if a file delete fails because it does not exist.
- * This can happen if a file is deleted by an external agent while
- * the parent directory is in progress of being deleted.
- */
- @Test
- public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse()
- throws Exception {
- testRecusiveDirectoryDelete(false);
- }
-
- private void testRecusiveDirectoryDelete(boolean useDir) throws Exception {
- String childPathToBeDeletedByExternalAgent = (useDir)
- ? "root/0"
- : "root/0/fileToRename";
- // Spy azure file system object and return false for deleting one file
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path(
- childPathToBeDeletedByExternalAgent)));
-
- Answer<Boolean> answer = new Answer<Boolean>() {
- public Boolean answer(InvocationOnMock invocation) throws Throwable {
- String path = (String) invocation.getArguments()[0];
- boolean isDir = (boolean) invocation.getArguments()[1];
- boolean realResult = fs.deleteFile(path, isDir);
- assertTrue(realResult);
- boolean fakeResult = false;
- return fakeResult;
- }
- };
-
- Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer);
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
-
- assertTrue(mockFs.delete(sourceFolder, true));
- assertFalse(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled, that a child directory was
- // deleted by an external caller, and the parent delete operation still
- // succeeds.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads");
- assertInLog(content, String.format("Attempt to delete non-existent %s %s",
- useDir ? "directory" : "file", path));
- }
-
- /*
- * Test case for delete operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testDeleteSingleDeleteException() throws Exception {
-
- // Spy azure file system object and raise exception for deleting one file
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
- Mockito.doThrow(new IOException()).when(mockFs).deleteFile(path, true);
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
-
- boolean exception = false;
- try {
- mockFs.delete(sourceFolder, true);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and delete operation failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Delete operation with threads");
- assertInLog(content,
- "Encountered Exception for Delete operation for file " + path);
- assertInLog(content,
- "Terminating execution of Delete operation now as some other thread already got exception or operation failed");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolExceptionFailure() throws Exception {
-
- // Spy azure file system object and raise exception for new thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- ((NativeAzureFileSystem) fs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception());
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.doReturn(mockThreadPoolExecutor).when(mockFs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS);
-
- validateRenameFolder(mockFs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content, "Failed to create thread pool with threads");
- assertInLog(content, "Serializing the Rename operation");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolExecuteFailure() throws Exception {
-
- // Mock thread pool executor to throw exception for all requests.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- validateRenameFolder(mockFs, "root", "rootnew");
-
- // Validate from logs that threads are disabled.
- String content = logs.getOutput();
- assertInLog(content,
- "Rejected execution of thread for Rename operation on blob");
- assertInLog(content, "Serializing the Rename operation");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolExecuteSingleThreadFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- // Create a thread executor and link it to mocked thread pool executor object.
- ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
- // Mock thread executor to throw exception for all requests.
- Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
-
- validateRenameFolder(mockFs, "root", "rootnew");
-
- // Validate from logs that threads are enabled and unused threads exists.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Rename operation with threads 7");
- assertInLog(content,
- "6 threads not used for Rename operation on blob");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameThreadPoolTerminationFailure() throws Exception {
-
- // Spy azure file system object and return mocked thread pool
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Spy a thread pool executor and link it to azure file system object.
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
- AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(
- mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS));
-
- // With single iteration, we would have created 7 blobs resulting 7 threads.
- Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename",
- path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor);
-
- // Mock thread executor to throw exception for all requests.
- ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
- Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
- Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException());
- Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
-
-
- createFolder(mockFs, "root");
- Path sourceFolder = new Path("root");
- Path destFolder = new Path("rootnew");
- boolean exception = false;
- try {
- mockFs.rename(sourceFolder, destFolder);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and rename operation is failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Rename operation with threads");
- assertInLog(content, "Threads got interrupted Rename blob operation");
- assertInLog(content,
- "Rename failed as operation on subfolders and files failed.");
- }
-
- /*
- * Test case for rename operation with multiple threads and flat listing enabled.
- */
- @Test
- public void testRenameSingleRenameException() throws Exception {
-
- // Spy azure file system object and raise exception for deleting one file
- Path sourceFolder = new Path("root");
- Path destFolder = new Path("rootnew");
-
- // Spy azure file system object and populate rename pending spy object.
- NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-
- // Populate data now only such that rename pending spy object would see this data.
- createFolder(mockFs, "root");
-
- String srcKey = mockFs.pathToKey(mockFs.makeAbsolute(sourceFolder));
- String dstKey = mockFs.pathToKey(mockFs.makeAbsolute(destFolder));
-
- FolderRenamePending mockRenameFs = Mockito.spy(mockFs.prepareAtomicFolderRename(srcKey, dstKey));
- Mockito.when(mockFs.prepareAtomicFolderRename(srcKey, dstKey)).thenReturn(mockRenameFs);
- String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
- Mockito.doThrow(new IOException()).when(mockRenameFs).renameFile(Mockito.any(FileMetadata.class));
-
- boolean exception = false;
- try {
- mockFs.rename(sourceFolder, destFolder);
- } catch (IOException e){
- exception = true;
- }
-
- assertTrue(exception);
- assertTrue(mockFs.exists(sourceFolder));
-
- // Validate from logs that threads are enabled and delete operation failed.
- String content = logs.getOutput();
- assertInLog(content,
- "Using thread pool for Rename operation with threads");
- assertInLog(content,
- "Encountered Exception for Rename operation for file " + path);
- assertInLog(content,
- "Terminating execution of Rename operation now as some other thread already got exception or operation failed");
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
deleted file mode 100644
index 6149154..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-
-import static org.apache.hadoop.fs.azure.SecureStorageInterfaceImpl.KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS;
-
-/**
- * Test class to hold all WASB authorization tests that use blob-specific keys
- * to access storage.
- */
-public class TestNativeAzureFSAuthWithBlobSpecificKeys
- extends TestNativeAzureFileSystemAuthorizationWithOwner {
-
- @Override
- public Configuration getConfiguration() {
- Configuration conf = super.getConfiguration();
- conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
- return conf;
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = getConfiguration();
- return AzureBlobStorageTestAccount.create(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
deleted file mode 100644
index 84558f8..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Test;
-
-import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE;
-
-/**
- * Test class to hold all WASB authorization caching related tests.
- */
-public class TestNativeAzureFSAuthorizationCaching
- extends TestNativeAzureFileSystemAuthorizationWithOwner {
-
- private static final int DUMMY_TTL_VALUE = 5000;
-
- @Override
- public Configuration getConfiguration() {
- Configuration conf = super.getConfiguration();
- conf.set(KEY_AUTH_SERVICE_CACHING_ENABLE, "true");
- return conf;
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = getConfiguration();
- return AzureBlobStorageTestAccount.create(conf);
- }
-
- /**
- * Test to verify cache behavior -- assert that PUT overwrites value if present
- */
- @Test
- public void testCachePut() throws Throwable {
- CachingAuthorizer<String, Integer> cache = new CachingAuthorizer<>(DUMMY_TTL_VALUE, "TEST");
- cache.init(getConfiguration());
- cache.put("TEST", 1);
- cache.put("TEST", 3);
- int result = cache.get("TEST");
- ContractTestUtils.assertTrue("Cache returned unexpected result", result == 3);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
deleted file mode 100644
index 208cff3..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Run the base Azure file system tests strictly on page blobs to make sure fundamental
- * operations on page blob files and folders work as expected.
- * These operations include create, delete, rename, list, and so on.
- */
-public class TestNativeAzureFSPageBlobLive extends
- NativeAzureFileSystemBaseTest {
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount()
- throws Exception {
- Configuration conf = new Configuration();
-
- // Configure the page blob directories key so every file created is a page blob.
- conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
-
- // Configure the atomic rename directories key so every folder will have
- // atomic rename applied.
- conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
- return AzureBlobStorageTestAccount.create(conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java
deleted file mode 100644
index a2b35cb..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAppend.java
+++ /dev/null
@@ -1,362 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.Arrays;
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
-
- private static final String TEST_FILE = "test.dat";
- private static final Path TEST_PATH = new Path(TEST_FILE);
-
- private AzureBlobStorageTestAccount testAccount = null;
-
- @Before
- public void setUp() throws Exception {
- super.setUp();
- testAccount = createTestAccount();
- fs = testAccount.getFileSystem();
- Configuration conf = fs.getConf();
- conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, true);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
- }
-
- /*
- * Helper method that creates test data of size provided by the
- * "size" parameter.
- */
- private static byte[] getTestData(int size) {
- byte[] testData = new byte[size];
- System.arraycopy(RandomStringUtils.randomAlphabetic(size).getBytes(), 0, testData, 0, size);
- return testData;
- }
-
- // Helper method to create file and write fileSize bytes of data on it.
- private byte[] createBaseFileWithData(int fileSize, Path testPath) throws Throwable {
-
- FSDataOutputStream createStream = null;
- try {
- createStream = fs.create(testPath);
- byte[] fileData = null;
-
- if (fileSize != 0) {
- fileData = getTestData(fileSize);
- createStream.write(fileData);
- }
- return fileData;
- } finally {
- if (createStream != null) {
- createStream.close();
- }
- }
- }
-
- /*
- * Helper method to verify a file data equal to "dataLength" parameter
- */
- private boolean verifyFileData(int dataLength, byte[] testData, int testDataIndex,
- FSDataInputStream srcStream) {
-
- try {
-
- byte[] fileBuffer = new byte[dataLength];
- byte[] testDataBuffer = new byte[dataLength];
-
- int fileBytesRead = srcStream.read(fileBuffer);
-
- if (fileBytesRead < dataLength) {
- return false;
- }
-
- System.arraycopy(testData, testDataIndex, testDataBuffer, 0, dataLength);
-
- if (!Arrays.equals(fileBuffer, testDataBuffer)) {
- return false;
- }
-
- return true;
-
- } catch (Exception ex) {
- return false;
- }
-
- }
-
- /*
- * Helper method to verify Append on a testFile.
- */
- private boolean verifyAppend(byte[] testData, Path testFile) {
-
- FSDataInputStream srcStream = null;
- try {
-
- srcStream = fs.open(testFile);
- int baseBufferSize = 2048;
- int testDataSize = testData.length;
- int testDataIndex = 0;
-
- while (testDataSize > baseBufferSize) {
-
- if (!verifyFileData(baseBufferSize, testData, testDataIndex, srcStream)) {
- return false;
- }
- testDataIndex += baseBufferSize;
- testDataSize -= baseBufferSize;
- }
-
- if (!verifyFileData(testDataSize, testData, testDataIndex, srcStream)) {
- return false;
- }
-
- return true;
- } catch(Exception ex) {
- return false;
- } finally {
- if (srcStream != null) {
- try {
- srcStream.close();
- } catch(IOException ioe) {
- // Swallowing
- }
- }
- }
- }
-
- /*
- * Test case to verify if an append on small size data works. This tests
- * append E2E
- */
- @Test
- public void testSingleAppend() throws Throwable{
-
- FSDataOutputStream appendStream = null;
- try {
- int baseDataSize = 50;
- byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
-
- int appendDataSize = 20;
- byte[] appendDataBuffer = getTestData(appendDataSize);
- appendStream = fs.append(TEST_PATH, 10);
- appendStream.write(appendDataBuffer);
- appendStream.close();
- byte[] testData = new byte[baseDataSize + appendDataSize];
- System.arraycopy(baseDataBuffer, 0, testData, 0, baseDataSize);
- System.arraycopy(appendDataBuffer, 0, testData, baseDataSize, appendDataSize);
-
- Assert.assertTrue(verifyAppend(testData, TEST_PATH));
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- /*
- * Test case to verify append to an empty file.
- */
- @Test
- public void testSingleAppendOnEmptyFile() throws Throwable {
-
- FSDataOutputStream appendStream = null;
-
- try {
- createBaseFileWithData(0, TEST_PATH);
-
- int appendDataSize = 20;
- byte[] appendDataBuffer = getTestData(appendDataSize);
- appendStream = fs.append(TEST_PATH, 10);
- appendStream.write(appendDataBuffer);
- appendStream.close();
-
- Assert.assertTrue(verifyAppend(appendDataBuffer, TEST_PATH));
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- /*
- * Test to verify that we can open only one Append stream on a File.
- */
- @Test
- public void testSingleAppenderScenario() throws Throwable {
-
- FSDataOutputStream appendStream1 = null;
- FSDataOutputStream appendStream2 = null;
- IOException ioe = null;
- try {
- createBaseFileWithData(0, TEST_PATH);
- appendStream1 = fs.append(TEST_PATH, 10);
- boolean encounteredException = false;
- try {
- appendStream2 = fs.append(TEST_PATH, 10);
- } catch(IOException ex) {
- encounteredException = true;
- ioe = ex;
- }
-
- appendStream1.close();
-
- Assert.assertTrue(encounteredException);
- GenericTestUtils.assertExceptionContains("Unable to set Append lease on the Blob", ioe);
- } finally {
- if (appendStream1 != null) {
- appendStream1.close();
- }
-
- if (appendStream2 != null) {
- appendStream2.close();
- }
- }
- }
-
- /*
- * Tests to verify multiple appends on a Blob.
- */
- @Test
- public void testMultipleAppends() throws Throwable {
-
- int baseDataSize = 50;
- byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
-
- int appendDataSize = 100;
- int targetAppendCount = 50;
- byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
- int testDataIndex = 0;
- System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
- testDataIndex += baseDataSize;
-
- int appendCount = 0;
-
- FSDataOutputStream appendStream = null;
-
- try {
- while (appendCount < targetAppendCount) {
-
- byte[] appendDataBuffer = getTestData(appendDataSize);
- appendStream = fs.append(TEST_PATH, 30);
- appendStream.write(appendDataBuffer);
- appendStream.close();
-
- System.arraycopy(appendDataBuffer, 0, testData, testDataIndex, appendDataSize);
- testDataIndex += appendDataSize;
- appendCount++;
- }
-
- Assert.assertTrue(verifyAppend(testData, TEST_PATH));
-
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- /*
- * Test to verify we multiple appends on the same stream.
- */
- @Test
- public void testMultipleAppendsOnSameStream() throws Throwable {
-
- int baseDataSize = 50;
- byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
- int appendDataSize = 100;
- int targetAppendCount = 50;
- byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
- int testDataIndex = 0;
- System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize);
- testDataIndex += baseDataSize;
- int appendCount = 0;
-
- FSDataOutputStream appendStream = null;
-
- try {
-
- while (appendCount < targetAppendCount) {
-
- appendStream = fs.append(TEST_PATH, 50);
-
- int singleAppendChunkSize = 20;
- int appendRunSize = 0;
- while (appendRunSize < appendDataSize) {
-
- byte[] appendDataBuffer = getTestData(singleAppendChunkSize);
- appendStream.write(appendDataBuffer);
- System.arraycopy(appendDataBuffer, 0, testData,
- testDataIndex + appendRunSize, singleAppendChunkSize);
-
- appendRunSize += singleAppendChunkSize;
- }
-
- appendStream.close();
- testDataIndex += appendDataSize;
- appendCount++;
- }
-
- Assert.assertTrue(verifyAppend(testData, TEST_PATH));
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- @Test(expected=UnsupportedOperationException.class)
- /*
- * Test to verify the behavior when Append Support configuration flag is set to false
- */
- public void testFalseConfigurationFlagBehavior() throws Throwable {
-
- fs = testAccount.getFileSystem();
- Configuration conf = fs.getConf();
- conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, false);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
-
- FSDataOutputStream appendStream = null;
-
- try {
- createBaseFileWithData(0, TEST_PATH);
- appendStream = fs.append(TEST_PATH, 10);
- } finally {
- if (appendStream != null) {
- appendStream.close();
- }
- }
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
deleted file mode 100644
index 602c1f7..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemAtomicRenameDirList
- extends AbstractWasbTestBase {
- private AzureBlobStorageTestAccount testAccount;
-
- // HBase-site config controlling HBase root dir
- private static final String HBASE_ROOT_DIR_CONF_STRING = "hbase.rootdir";
- private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS = "wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- return testAccount;
- }
-
- @Test
- public void testAzureNativeStoreIsAtomicRenameKeyDoesNotThrowNPEOnInitializingWithNonDefaultURI () throws IOException {
- NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
- AzureNativeFileSystemStore azureStore = azureFs.getStore();
- Configuration conf = fs.getConf();
- conf.set(HBASE_ROOT_DIR_CONF_STRING, HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS);
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
- azureStore.isAtomicRenameKey("anyrandomkey");
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[20/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
HADOOP-14553. Add (parallelized) integration tests to hadoop-azure
Contributed by Steve Loughran
(cherry picked from commit 2d2d97fa7d4224369b3c13bc4a45e8cc9e29afb1)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f6b08f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f6b08f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f6b08f8
Branch: refs/heads/branch-3.0
Commit: 9f6b08f8404ea8576bd20bfeeb92bc2c206ae0d9
Parents: b5e9982
Author: Steve Loughran <st...@apache.org>
Authored: Fri Sep 15 17:04:43 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Sep 15 17:04:43 2017 +0100
----------------------------------------------------------------------
.../hadoop/fs/FileSystemContractBaseTest.java | 11 +-
.../fs/contract/AbstractContractOpenTest.java | 4 +-
.../fs/contract/AbstractContractSeekTest.java | 2 +-
hadoop-tools/hadoop-azure/pom.xml | 251 ++++++
.../fs/azure/AzureNativeFileSystemStore.java | 2 +-
.../hadoop-azure/src/site/markdown/index.md | 94 +-
.../src/site/markdown/testing_azure.md | 576 ++++++++++++
.../hadoop/fs/azure/AbstractWasbTestBase.java | 136 ++-
.../fs/azure/AbstractWasbTestWithTimeout.java | 73 ++
.../fs/azure/AzureBlobStorageTestAccount.java | 42 +-
.../azure/ITestAzureConcurrentOutOfBandIo.java | 179 ++++
...zureConcurrentOutOfBandIoWithSecureMode.java | 33 +
.../ITestAzureFileSystemErrorConditions.java | 243 +++++
.../fs/azure/ITestBlobDataValidation.java | 244 ++++++
.../fs/azure/ITestBlobTypeSpeedDifference.java | 163 ++++
.../fs/azure/ITestBlockBlobInputStream.java | 874 ++++++++++++++++++
.../hadoop/fs/azure/ITestContainerChecks.java | 194 ++++
...estFileSystemOperationExceptionHandling.java | 283 ++++++
...TestFileSystemOperationExceptionMessage.java | 79 ++
...perationsExceptionHandlingMultiThreaded.java | 366 ++++++++
.../ITestFileSystemOperationsWithThreads.java | 821 +++++++++++++++++
...stNativeAzureFSAuthWithBlobSpecificKeys.java | 40 +
.../ITestNativeAzureFSAuthorizationCaching.java | 53 ++
.../azure/ITestNativeAzureFSPageBlobLive.java | 43 +
.../azure/ITestNativeAzureFileSystemAppend.java | 350 ++++++++
...ativeAzureFileSystemAtomicRenameDirList.java | 55 ++
...veAzureFileSystemAuthorizationWithOwner.java | 122 +++
...ITestNativeAzureFileSystemClientLogging.java | 136 +++
...estNativeAzureFileSystemConcurrencyLive.java | 185 ++++
...stNativeAzureFileSystemContractEmulator.java | 65 ++
.../ITestNativeAzureFileSystemContractLive.java | 108 +++
...tiveAzureFileSystemContractPageBlobLive.java | 114 +++
.../azure/ITestNativeAzureFileSystemLive.java | 236 +++++
.../ITestOutOfBandAzureBlobOperationsLive.java | 185 ++++
.../ITestReadAndSeekPageBlobAfterWrite.java | 341 ++++++++
.../fs/azure/ITestWasbRemoteCallHelper.java | 568 ++++++++++++
.../fs/azure/ITestWasbUriAndConfiguration.java | 610 +++++++++++++
.../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 103 ++-
.../fs/azure/NativeAzureFileSystemBaseTest.java | 115 ++-
.../hadoop/fs/azure/RunningLiveWasbTests.txt | 22 -
.../azure/TestAzureConcurrentOutOfBandIo.java | 195 -----
...zureConcurrentOutOfBandIoWithSecureMode.java | 50 --
.../TestAzureFileSystemErrorConditions.java | 244 ------
.../hadoop/fs/azure/TestBlobDataValidation.java | 237 -----
.../hadoop/fs/azure/TestBlobMetadata.java | 7 +-
.../fs/azure/TestBlobOperationDescriptor.java | 3 -
.../fs/azure/TestBlobTypeSpeedDifference.java | 160 ----
.../fs/azure/TestBlockBlobInputStream.java | 875 -------------------
.../fs/azure/TestClientThrottlingAnalyzer.java | 5 +-
.../hadoop/fs/azure/TestContainerChecks.java | 185 ----
...estFileSystemOperationExceptionHandling.java | 269 ------
...TestFileSystemOperationExceptionMessage.java | 79 --
...perationsExceptionHandlingMultiThreaded.java | 330 -------
.../TestFileSystemOperationsWithThreads.java | 821 -----------------
...stNativeAzureFSAuthWithBlobSpecificKeys.java | 44 -
.../TestNativeAzureFSAuthorizationCaching.java | 60 --
.../fs/azure/TestNativeAzureFSPageBlobLive.java | 43 -
.../azure/TestNativeAzureFileSystemAppend.java | 362 --------
...ativeAzureFileSystemAtomicRenameDirList.java | 50 --
.../TestNativeAzureFileSystemAuthorization.java | 53 +-
...veAzureFileSystemAuthorizationWithOwner.java | 122 ---
...TestNativeAzureFileSystemBlockLocations.java | 8 +-
.../TestNativeAzureFileSystemClientLogging.java | 140 ---
.../TestNativeAzureFileSystemConcurrency.java | 29 +-
...estNativeAzureFileSystemConcurrencyLive.java | 184 ----
...stNativeAzureFileSystemContractEmulator.java | 48 -
.../TestNativeAzureFileSystemContractLive.java | 80 --
...TestNativeAzureFileSystemContractMocked.java | 3 +
...tiveAzureFileSystemContractPageBlobLive.java | 93 --
.../TestNativeAzureFileSystemFileNameCheck.java | 28 +-
.../fs/azure/TestNativeAzureFileSystemLive.java | 242 -----
.../azure/TestNativeAzureFileSystemMocked.java | 4 +
.../TestNativeAzureFileSystemUploadLogic.java | 78 +-
.../azure/TestOutOfBandAzureBlobOperations.java | 8 +-
.../TestOutOfBandAzureBlobOperationsLive.java | 203 -----
.../TestReadAndSeekPageBlobAfterWrite.java | 355 --------
.../azure/TestShellDecryptionKeyProvider.java | 15 +-
.../apache/hadoop/fs/azure/TestWasbFsck.java | 9 +-
.../fs/azure/TestWasbRemoteCallHelper.java | 569 ------------
.../fs/azure/TestWasbUriAndConfiguration.java | 617 -------------
.../ITestAzureNativeContractAppend.java | 41 +
.../ITestAzureNativeContractCreate.java | 34 +
.../ITestAzureNativeContractDelete.java | 33 +
.../ITestAzureNativeContractDistCp.java | 47 +
.../ITestAzureNativeContractGetFileStatus.java | 35 +
.../contract/ITestAzureNativeContractMkdir.java | 33 +
.../contract/ITestAzureNativeContractOpen.java | 34 +
.../ITestAzureNativeContractRename.java | 34 +
.../contract/ITestAzureNativeContractSeek.java | 34 +
.../contract/NativeAzureFileSystemContract.java | 19 +-
.../contract/TestAzureNativeContractAppend.java | 37 -
.../contract/TestAzureNativeContractCreate.java | 30 -
.../contract/TestAzureNativeContractDelete.java | 30 -
.../contract/TestAzureNativeContractDistCp.java | 33 -
.../TestAzureNativeContractGetFileStatus.java | 30 -
.../contract/TestAzureNativeContractMkdir.java | 30 -
.../contract/TestAzureNativeContractOpen.java | 30 -
.../contract/TestAzureNativeContractRename.java | 30 -
.../contract/TestAzureNativeContractSeek.java | 30 -
.../integration/AbstractAzureScaleTest.java | 66 ++
.../azure/integration/AzureTestConstants.java | 180 ++++
.../fs/azure/integration/AzureTestUtils.java | 479 ++++++++++
.../integration/CleanupTestContainers.java | 87 ++
.../azure/integration/ITestAzureHugeFiles.java | 456 ++++++++++
.../hadoop/fs/azure/integration/Sizes.java | 43 +
.../ITestAzureFileSystemInstrumentation.java | 586 +++++++++++++
.../TestAzureFileSystemInstrumentation.java | 579 ------------
107 files changed, 10227 insertions(+), 7901 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index b49dd53..a4ccee3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -61,7 +61,16 @@ public abstract class FileSystemContractBaseTest {
protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
@Rule
- public Timeout globalTimeout = new Timeout(30000);
+ public Timeout globalTimeout = new Timeout(getGlobalTimeout());
+
+ /**
+ * Get the timeout in milliseconds for each test case.
+ * @return a time in milliseconds.
+ */
+ protected int getGlobalTimeout() {
+ return 30 * 1000;
+ }
+
@Rule
public ExpectedException thrown = ExpectedException.none();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index f9b16f4..ccf188f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -122,7 +122,7 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
Path path = path("testopenfiletwice.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
- createFile(getFileSystem(), path, false, block);
+ createFile(getFileSystem(), path, true, block);
//open first
FSDataInputStream instream1 = getFileSystem().open(path);
FSDataInputStream instream2 = null;
@@ -150,7 +150,7 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
int base = 0x40; // 64
byte[] block = dataset(len, base, base + len);
//this file now has a simple rule: offset => (value | 0x40)
- createFile(getFileSystem(), path, false, block);
+ createFile(getFileSystem(), path, true, block);
//open first
instream = getFileSystem().open(path);
assertEquals(base, instream.read());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index 3e71682..7af3cb0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -341,7 +341,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
int filesize = 10 * 1024;
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
- createFile(getFileSystem(), randomSeekFile, false, buf);
+ createFile(getFileSystem(), randomSeekFile, true, buf);
Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 03b531b..1cde471 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -34,6 +34,15 @@
<properties>
<file.encoding>UTF-8</file.encoding>
<downloadSources>true</downloadSources>
+ <hadoop.tmp.dir>${project.build.directory}/test</hadoop.tmp.dir>
+ <!-- are scale tests enabled ? -->
+ <fs.azure.scale.test.enabled>unset</fs.azure.scale.test.enabled>
+ <!-- Size in MB of huge files. -->
+ <fs.azure.scale.test.huge.filesize>unset</fs.azure.scale.test.huge.filesize>
+ <!-- Size in MB of the partion size in huge file uploads. -->
+ <fs.azure.scale.test.huge.partitionsize>unset</fs.azure.scale.test.huge.partitionsize>
+ <!-- Timeout in seconds for scale tests.-->
+ <fs.azure.scale.test.timeout>7200</fs.azure.scale.test.timeout>
</properties>
<build>
@@ -224,4 +233,246 @@
</dependency>
</dependencies>
+
+ <profiles>
+ <profile>
+ <id>parallel-tests</id>
+ <activation>
+ <property>
+ <name>parallel-tests</name>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-parallel-tests-dirs</id>
+ <phase>test-compile</phase>
+ <configuration>
+ <target>
+ <script language="javascript"><![CDATA[
+ var baseDirs = [
+ project.getProperty("test.build.data"),
+ project.getProperty("test.build.dir"),
+ project.getProperty("hadoop.tmp.dir")
+ ];
+ for (var i in baseDirs) {
+ for (var j = 1; j <= ${testsThreadCount}; ++j) {
+ var mkdir = project.createTask("mkdir");
+ mkdir.setDir(new java.io.File(baseDirs[i], j));
+ mkdir.perform();
+ }
+ }
+ ]]></script>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>default-test</id>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ <configuration>
+ <forkCount>1</forkCount>
+ <forkCount>${testsThreadCount}</forkCount>
+ <reuseForks>false</reuseForks>
+ <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+ <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+ <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+ <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <includes>
+ <include>**/Test*.java</include>
+ </includes>
+ <excludes>
+ <exclude>**/TestRollingWindowAverage*.java</exclude>
+ </excludes>
+ </configuration>
+ </execution>
+ <execution>
+ <id>serialized-test</id>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ <configuration>
+ <forkCount>1</forkCount>
+ <reuseForks>false</reuseForks>
+ <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+ <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+ <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+ <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <includes>
+ <include>**/TestRollingWindowAverage*.java</include>
+ </includes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>default-integration-test</id>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <forkCount>${testsThreadCount}</forkCount>
+ <reuseForks>false</reuseForks>
+ <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <!-- Tell tests that they are being executed in parallel -->
+ <test.parallel.execution>true</test.parallel.execution>
+ <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+ <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+ <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+
+ <!-- Due to a Maven quirk, setting this to just -->
+ <!-- surefire.forkNumber won't do the parameter -->
+ <!-- substitution. Putting a prefix in front of it like -->
+ <!-- "fork-" makes it work. -->
+ <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+ <!-- Propagate scale parameters -->
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <!-- Some tests cannot run in parallel. Tests that cover -->
+ <!-- access to the root directory must run in isolation -->
+ <!-- from anything else that could modify the bucket. -->
+ <!-- azure tests that cover multi-part upload must run in -->
+ <!-- isolation, because the file system is configured to -->
+ <!-- purge existing multi-part upload data on -->
+ <!-- initialization. MiniYARNCluster has not yet been -->
+ <!-- changed to handle parallel test execution gracefully. -->
+ <!-- Exclude all of these tests from parallel execution, -->
+ <!-- and instead run them sequentially in a separate -->
+ <!-- Surefire execution step later. -->
+ <includes>
+ <include>**/ITest*.java</include>
+ </includes>
+ <excludes>
+ <exclude>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</exclude>
+ <exclude>**/ITestFileSystemOperationsWithThreads.java</exclude>
+ <exclude>**/ITestOutOfBandAzureBlobOperationsLive.java</exclude>
+ <exclude>**/ITestNativeAzureFileSystemAuthorizationWithOwner.java</exclude>
+ <exclude>**/ITestNativeAzureFileSystemConcurrencyLive.java</exclude>
+ <exclude>**/ITestNativeAzureFileSystemLive.java</exclude>
+ <exclude>**/ITestNativeAzureFSPageBlobLive.java</exclude>
+ <exclude>**/ITestWasbRemoteCallHelper.java</exclude>
+ <exclude>**/ITestBlockBlobInputStream.java</exclude>
+ </excludes>
+ </configuration>
+ </execution>
+ <!-- Do a sequential run for tests that cannot handle -->
+ <!-- parallel execution. -->
+ <execution>
+ <id>sequential-integration-tests</id>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ <systemPropertyVariables>
+ <test.parallel.execution>false</test.parallel.execution>
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <includes>
+ <include>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</include>
+ <include>**/ITestFileSystemOperationsWithThreads.java</include>
+ <include>**/ITestOutOfBandAzureBlobOperationsLive.java</include>
+ <include>**/ITestNativeAzureFileSystemAuthorizationWithOwner.java</include>
+ <include>**/ITestNativeAzureFileSystemConcurrencyLive.java</include>
+ <include>**/ITestNativeAzureFileSystemLive.java</include>
+ <include>**/ITestWasbRemoteCallHelper.java</include>
+ <include>**/ITestBlockBlobInputStream.java</include>
+ </includes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ <profile>
+ <id>sequential-tests</id>
+ <activation>
+ <property>
+ <name>!parallel-tests</name>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <systemPropertyVariables>
+ <!-- Propagate scale parameters -->
+ <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
+ <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
+ <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+ </systemPropertyVariables>
+ <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+
+ <!-- Turn on scale tests-->
+ <profile>
+ <id>scale</id>
+ <activation>
+ <property>
+ <name>scale</name>
+ </property>
+ </activation>
+ <properties>
+ <fs.azure.scale.test.enabled>true</fs.azure.scale.test.enabled>
+ </properties>
+ </profile>
+ </profiles>
</project>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 639862f..f1031b4 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -346,7 +346,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
private String delegationToken;
/** The error message template when container is not accessible. */
- static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
+ public static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
+ "account %s in the configuration, and its container %s is not "
+ "accessible using anonymous credentials. Please check if the container "
+ "exists first. If it is not publicly available, you have to provide "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 466bf0b..876d7cc 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -519,96 +519,8 @@ The maximum number of entries that that cache can hold can be customized using t
<value>true</value>
</property>
```
-## Testing the hadoop-azure Module
-The hadoop-azure module includes a full suite of unit tests. Most of the tests
-will run without additional configuration by running `mvn test`. This includes
-tests against mocked storage, which is an in-memory emulation of Azure Storage.
-
-A selection of tests can run against the
-[Azure Storage Emulator](http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx)
-which is a high-fidelity emulation of live Azure Storage. The emulator is
-sufficient for high-confidence testing. The emulator is a Windows executable
-that runs on a local machine.
-
-To use the emulator, install Azure SDK 2.3 and start the storage emulator. Then,
-edit `src/test/resources/azure-test.xml` and add the following property:
-
-```xml
-<property>
- <name>fs.azure.test.emulator</name>
- <value>true</value>
-</property>
-```
-
-There is a known issue when running tests with the emulator. You may see the
-following failure message:
-
- com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
-
-To resolve this, restart the Azure Emulator. Ensure it v3.2 or later.
-
-It's also possible to run tests against a live Azure Storage account by saving a
-file to `src/test/resources/azure-auth-keys.xml` and setting
-the name of the storage account and its access key.
-
-For example:
-
-```xml
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>fs.azure.test.account.name</name>
- <value>{ACCOUNTNAME}.blob.core.windows.net</value>
- </property>
- <property>
- <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
- <value>{ACCOUNT ACCESS KEY}</value>
- </property>
-</configuration>
-```
-
-To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
-and the account access key. For example:
-
-```xml
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>fs.contract.test.fs.wasb</name>
- <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
- <description>The name of the azure file system for testing.</description>
- </property>
- <property>
- <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
- <value>{ACCOUNT ACCESS KEY}</value>
- </property>
-</configuration>
-```
-
-Overall, to run all the tests using `mvn test`, a sample `azure-auth-keys.xml` is like following:
-
-```xml
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>fs.azure.test.account.name</name>
- <value>{ACCOUNTNAME}.blob.core.windows.net</value>
- </property>
- <property>
- <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
- <value>{ACCOUNT ACCESS KEY}</value>
- </property>
- <property>
- <name>fs.contract.test.fs.wasb</name>
- <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
- </property>
-</configuration>
-```
-
-DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL. The keys to your Azure
-Storage account are a secret and must not be shared.
+## Further Reading
+* [Testing the Azure WASB client](testing_azure.html).
+* MSDN article, [Understanding Block Blobs, Append Blobs, and Page Blobs](https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
new file mode 100644
index 0000000..b58e68b
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
@@ -0,0 +1,576 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+# Testing the Azure WASB client
+
+<!-- MACRO{toc|fromDepth=0|toDepth=5} -->
+
+This module includes both unit tests, which can run in isolation without
+connecting to the Azure Storage service, and integration tests, which require a working
+connection to interact with a container. Unit test suites follow the naming
+convention `Test*.java`. Integration tests follow the naming convention
+`ITest*.java`.
+
+## Policy for submitting patches which affect the `hadoop-azure` module.
+
+The Apache Jenkins infrastucture does not run any cloud integration tests,
+due to the need to keep credentials secure.
+
+### The submitter of any patch is required to run all the integration tests and declare which Azure region they used.
+
+This is important: **patches which do not include this declaration will be ignored**
+
+This policy has proven to be the only mechanism to guarantee full regression
+testing of code changes. Why the declaration of region? Two reasons
+
+1. It helps us identify regressions which only surface against specific endpoints.
+1. It forces the submitters to be more honest about their testing. It's easy
+to lie, "yes, I tested this". To say "yes, I tested this against Azure US-west"
+is a more specific lie and harder to make. And, if you get caught out: you
+lose all credibility with the project.
+
+You don't need to test from a VM within the Azure infrastructure, all you need
+are credentials.
+
+It's neither hard nor expensive to run the tests; if you can't,
+there's no guarantee your patch works. The reviewers have enough to do, and
+don't have the time to do these tests, especially as every failure will simply
+make for a slow iterative development.
+
+Please: run the tests. And if you don't, we are sorry for declining your
+patch, but we have to.
+
+
+### What if there's an intermittent failure of a test?
+
+Some of the tests do fail intermittently, especially in parallel runs.
+If this happens, try to run the test on its own to see if the test succeeds.
+
+If it still fails, include this fact in your declaration. We know some tests
+are intermittently unreliable.
+
+### What if the tests are timing out or failing over my network connection?
+
+The tests are designed to be configurable for different
+timeouts. If you are seeing problems and this configuration isn't working,
+that's a sign of the configuration mechanism isn't complete. If it's happening
+in the production code, that could be a sign of a problem which may surface
+over long-haul connections. Please help us identify and fix these problems
+— especially as you are the one best placed to verify the fixes work.
+
+## Setting up the tests
+
+## Testing the `hadoop-azure` Module
+
+The `hadoop-azure` module includes a full suite of unit tests. Many of the tests
+will run without additional configuration by running `mvn test`. This includes
+tests against mocked storage, which is an in-memory emulation of Azure Storage.
+
+The integration tests are designed to test directly against an Azure storage
+service, and require an account and credentials in order to run.
+
+This is done by creating the file to `src/test/resources/azure-auth-keys.xml`
+and setting the name of the storage account and its access key.
+
+For example:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>fs.azure.test.account.name</name>
+ <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+ </property>
+ <property>
+ <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+ <value>{ACCOUNT ACCESS KEY}</value>
+ </property>
+</configuration>
+```
+
+To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
+and the account access key. For example:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>fs.contract.test.fs.wasb</name>
+ <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+ <description>The name of the azure file system for testing.</description>
+ </property>
+ <property>
+ <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+ <value>{ACCOUNT ACCESS KEY}</value>
+ </property>
+</configuration>
+```
+
+Overall, to run all the tests using `mvn test`, a sample `azure-auth-keys.xml` is like following:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>fs.azure.test.account.name</name>
+ <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+ </property>
+ <property>
+ <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+ <value>{ACCOUNT ACCESS KEY}</value>
+ </property>
+ <property>
+ <name>fs.contract.test.fs.wasb</name>
+ <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+ </property>
+</configuration>
+```
+
+DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL. The keys to your Azure
+Storage account are a secret and must not be shared.
+
+
+## Running the Tests
+
+After completing the configuration, execute the test run through Maven.
+
+```bash
+mvn -T 1C clean verify
+```
+
+It's also possible to execute multiple test suites in parallel by passing the
+`parallel-tests` property on the command line. The tests spend most of their
+time blocked on network I/O, so running in parallel tends to
+complete full test runs faster.
+
+```bash
+mvn -T 1C -Dparallel-tests clean verify
+```
+
+Some tests must run with exclusive access to the storage container, so even with the
+`parallel-tests` property, several test suites will run in serial in a separate
+Maven execution step after the parallel tests.
+
+By default, `parallel-tests` runs 4 test suites concurrently. This can be tuned
+by passing the `testsThreadCount` property.
+
+```bash
+mvn -T 1C -Dparallel-tests -DtestsThreadCount=8 clean verify
+```
+
+<!---
+To run just unit tests, which do not require Azure connectivity or credentials,
+use any of the above invocations, but switch the goal to `test` instead of
+`verify`.
+-->
+
+```bash
+mvn -T 1C clean test
+
+mvn -T 1C -Dparallel-tests clean test
+
+mvn -T 1C -Dparallel-tests -DtestsThreadCount=8 clean test
+```
+
+To run only a specific named subset of tests, pass the `test` property for unit
+tests or the `it.test` property for integration tests.
+
+```bash
+mvn -T 1C clean test -Dtest=TestRollingWindowAverage
+
+mvn -T 1C clean verify -Dscale -Dit.test=ITestFileSystemOperationExceptionMessage -Dtest=none
+
+mvn -T 1C clean verify -Dtest=none -Dit.test=ITest*
+
+```
+
+Note
+
+1. When running a specific subset of tests, the patterns passed in `test`
+and `it.test` override the configuration of which tests need to run in isolation
+in a separate serial phase (mentioned above). This can cause unpredictable
+results, so the recommendation is to avoid passing `parallel-tests` in
+combination with `test` or `it.test`. If you know that you are specifying only
+tests that can run safely in parallel, then it will work. For wide patterns,
+like `ITest*` shown above, it may cause unpredictable test failures.
+
+2. The command line shell may try to expand the "*" and sometimes the "#" symbols
+in test patterns. In such situations, escape the character it with a "\\" prefix.
+Example:
+
+ mvn -T 1C clean verify -Dtest=none -Dit.test=ITest\*
+
+
+## Viewing the results
+
+Integration test results and logs are stored in `target/failsafe-reports/`.
+An HTML report can be generated during site generation, or with the `surefire-report`
+plugin:
+
+```bash
+
+# for the unit tests
+mvn -T 1C surefire-report:report-only
+
+# for the integration tests
+mvn -T 1C surefire-report:failsafe-report-only
+
+# all reports for this module
+mvn -T 1C site:site
+```
+
+## Scale Tests
+
+There are a set of tests designed to measure the scalability and performance
+at scale of the filesystem client, *Scale Tests*. Tests include: creating
+and traversing directory trees, uploading large files, renaming them,
+deleting them, seeking through the files, performing random IO, and others.
+This makes them a foundational part of the benchmarking.
+
+By their very nature they are slow. And, as their execution time is often
+limited by bandwidth between the computer running the tests and the Azure endpoint,
+parallel execution does not speed these tests up.
+
+### Enabling the Scale Tests
+
+The tests are enabled if the `scale` property is set in the maven build
+this can be done regardless of whether or not the parallel test profile
+is used
+
+```bash
+mvn -T 1C verify -Dscale
+
+mvn -T 1C verify -Dparallel-tests -Dscale -DtestsThreadCount=8
+```
+
+The most bandwidth intensive tests (those which upload data) always run
+sequentially; those which are slow due to HTTPS setup costs or server-side
+actions are included in the set of parallelized tests.
+
+
+### Scale test tuning options
+
+
+Some of the tests can be tuned from the maven build or from the
+configuration file used to run the tests.
+
+```bash
+mvn -T 1C verify -Dparallel-tests -Dscale -DtestsThreadCount=8 -Dfs.azure.scale.test.huge.filesize=128M
+```
+
+The algorithm is
+
+1. The value is queried from the configuration file, using a default value if
+it is not set.
+1. The value is queried from the JVM System Properties, where it is passed
+down by maven.
+1. If the system property is null, an empty string, or it has the value `unset`,
+then the configuration value is used. The `unset` option is used to
+[work round a quirk in maven property propagation](http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven).
+
+Only a few properties can be set this way; more will be added.
+
+| Property | Meaninging |
+|-----------|-------------|
+| `fs.azure.scale.test.huge.filesize`| Size for huge file uploads |
+| `fs.azure.scale.test.huge.huge.partitionsize`| Size for partitions in huge file uploads |
+
+The file and partition sizes are numeric values with a k/m/g/t/p suffix depending
+on the desired size. For example: 128M, 128m, 2G, 2G, 4T or even 1P.
+
+#### Scale test configuration options
+
+Some scale tests perform multiple operations (such as creating many directories).
+
+The exact number of operations to perform is configurable in the option
+`scale.test.operation.count`
+
+```xml
+<property>
+ <name>scale.test.operation.count</name>
+ <value>10</value>
+</property>
+```
+
+Larger values generate more load, and are recommended when testing locally,
+or in batch runs.
+
+Smaller values results in faster test runs, especially when the object
+store is a long way away.
+
+Operations which work on directories have a separate option: this controls
+the width and depth of tests creating recursive directories. Larger
+values create exponentially more directories, with consequent performance
+impact.
+
+```xml
+<property>
+ <name>scale.test.directory.count</name>
+ <value>2</value>
+</property>
+```
+
+DistCp tests targeting Azure support a configurable file size. The default is
+10 MB, but the configuration value is expressed in KB so that it can be tuned
+smaller to achieve faster test runs.
+
+```xml
+<property>
+ <name>scale.test.distcp.file.size.kb</name>
+ <value>10240</value>
+</property>
+```
+
+Azure-specific scale test properties are
+
+##### `fs.azure.scale.test.huge.filesize`: size in MB for "Huge file tests".
+
+The Huge File tests validate Azure storages's ability to handle large files —the property
+`fs.azure.scale.test.huge.filesize` declares the file size to use.
+
+```xml
+<property>
+ <name>fs.azure.scale.test.huge.filesize</name>
+ <value>200M</value>
+</property>
+```
+
+Tests at this scale are slow: they are best executed from hosts running in
+the cloud infrastructure where the storage endpoint is based.
+
+## Using the emulator
+
+A selection of tests can run against the
+[Azure Storage Emulator](http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx)
+which is a high-fidelity emulation of live Azure Storage. The emulator is
+sufficient for high-confidence testing. The emulator is a Windows executable
+that runs on a local machine.
+
+To use the emulator, install Azure SDK 2.3 and start the storage emulator. Then,
+edit `src/test/resources/azure-test.xml` and add the following property:
+
+```xml
+<property>
+ <name>fs.azure.test.emulator</name>
+ <value>true</value>
+</property>
+```
+
+There is a known issue when running tests with the emulator. You may see the
+following failure message:
+
+ com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
+
+To resolve this, restart the Azure Emulator. Ensure it is v3.2 or later.
+
+
+## Debugging Test failures
+
+Logging at debug level is the standard way to provide more diagnostics output;
+after setting this rerun the tests
+
+```properties
+log4j.logger.org.apache.hadoop.fs.azure=DEBUG
+```
+
+## Adding new tests
+
+New tests are always welcome. Bear in mind that we need to keep costs
+and test time down, which is done by
+
+* Not duplicating tests.
+* Being efficient in your use of Hadoop API calls.
+* Isolating large/slow tests into the "scale" test group.
+* Designing all tests to execute in parallel (where possible).
+* Adding new probes and predicates into existing tests, albeit carefully.
+
+*No duplication*: if an operation is tested elsewhere, don't repeat it. This
+applies as much for metadata operations as it does for bulk IO. If a new
+test case is added which completely obsoletes an existing test, it is OK
+to cut the previous one —after showing that coverage is not worsened.
+
+*Efficient*: prefer the `getFileStatus()` and examining the results, rather than
+call to `exists()`, `isFile()`, etc.
+
+*Fail with useful information:* provide as much diagnostics as possible
+on a failure. Using `org.apache.hadoop.fs.contract.ContractTestUtils` to make
+assertions about the state of a filesystem helps here.
+
+*Isolating Scale tests*. Any test doing large amounts of IO MUST extend the
+class `AbstractAzureScaleTest`, so only running if `scale` is defined on a build,
+supporting test timeouts configurable by the user. Scale tests should also
+support configurability as to the actual size of objects/number of operations,
+so that behavior at different scale can be verified.
+
+*Designed for parallel execution*. A key need here is for each test suite to work
+on isolated parts of the filesystem. Subclasses of `AbstractWasbTestBase`
+SHOULD use the `path()`, `methodpath()` and `blobpath()` methods,
+to build isolated paths. Tests MUST NOT assume that they have exclusive access
+to a bucket.
+
+*Extending existing tests where appropriate*. This recommendation goes
+against normal testing best practise of "test one thing per method".
+Because it is so slow to create directory trees or upload large files, we do
+not have that luxury. All the tests against real endpoints are integration
+tests where sharing test setup and teardown saves time and money.
+
+A standard way to do this is to extend existing tests with some extra predicates,
+rather than write new tests. When doing this, make sure that the new predicates
+fail with meaningful diagnostics, so any new problems can be easily debugged
+from test logs.
+
+
+### Requirements of new Tests
+
+
+This is what we expect from new tests; they're an extension of the normal
+Hadoop requirements, based on the need to work with remote servers whose
+use requires the presence of secret credentials, where tests may be slow,
+and where finding out why something failed from nothing but the test output
+is critical.
+
+#### Subclasses Existing Shared Base Blasses
+
+There are a set of base classes which should be extended for Azure tests and
+integration tests.
+
+##### `org.apache.hadoop.fs.azure.AbstractWasbTestWithTimeout`
+
+This extends the junit `Assert` class with thread names and timeouts,
+the default timeout being set in `AzureTestConstants.AZURE_TEST_TIMEOUT` to
+ten minutes. The thread names are set to aid analyzing the stack trace of
+a test: a `jstack` call can be used to
+
+##### `org.apache.hadoop.fs.azure.AbstractWasbTestBase`
+
+The base class for tests which use `AzureBlobStorageTestAccount` to create
+mock or live Azure clients; in test teardown it tries to clean up store state.
+
+1. This class requires subclasses to implement `createTestAccount()` to create
+a mock or real test account.
+
+1. The configuration used to create a test account *should* be that from
+`createConfiguration()`; this can be extended in subclasses to tune the settings.
+
+
+##### `org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest`
+
+This extends `AbstractWasbTestBase` for scale tests; those test which
+only run when `-Dscale` is used to select the "scale" profile.
+These tests have a timeout of 30 minutes, so as to support slow test runs.
+
+Having shared base classes help reduces future maintenance. Please
+use them.
+
+#### Secure
+
+Don't ever log credentials. The credential tests go out of their way to
+not provide meaningful logs or assertion messages precisely to avoid this.
+
+#### Efficient of Time and Money
+
+This means efficient in test setup/teardown, and, ideally, making use of
+existing public datasets to save setup time and tester cost.
+
+
+The reference example is `ITestAzureHugeFiles`:. This marks the test suite as
+`@FixMethodOrder(MethodSorters.NAME_ASCENDING)` then orders the test cases such
+that each test case expects the previous test to have completed (here: uploaded a file,
+renamed a file, ...). This provides for independent tests in the reports, yet still
+permits an ordered sequence of operations. Do note the use of `Assume.assume()`
+to detect when the preconditions for a single test case are not met, hence,
+the tests become skipped, rather than fail with a trace which is really a false alarm.
+
+
+### Works Over Long-haul Links
+
+As well as making file size and operation counts scaleable, this includes
+making test timeouts adequate. The Scale tests make this configurable; it's
+hard coded to ten minutes in `AbstractAzureIntegrationTest()`; subclasses can
+change this by overriding `getTestTimeoutMillis()`.
+
+Equally importantly: support proxies, as some testers need them.
+
+
+### Provides Diagnostics and timing information
+
+1. Create logs, log things.
+1. you can use `AbstractWasbTestBase.describe(format-string, args)` here; it
+adds some newlines so as to be easier to spot.
+1. Use `ContractTestUtils.NanoTimer` to measure the duration of operations,
+and log the output.
+
+#### Fails Meaningfully
+
+The `ContractTestUtils` class contains a whole set of assertions for making
+statements about the expected state of a filesystem, e.g.
+`assertPathExists(FS, path)`, `assertPathDoesNotExists(FS, path)`, and others.
+These do their best to provide meaningful diagnostics on failures (e.g. directory
+listings, file status, ...), so help make failures easier to understand.
+
+At the very least, *do not use `assertTrue()` or `assertFalse()` without
+including error messages*.
+
+
+### Cleans Up Afterwards
+
+Keeps costs down.
+
+1. Do not only cleanup if a test case completes successfully; test suite
+teardown must do it.
+1. That teardown code must check for the filesystem and other fields being
+null before the cleanup. Why? If test setup fails, the teardown methods still
+get called.
+
+### Works Reliably
+
+We really appreciate this — you will too.
+
+
+## Tips
+
+### How to keep your credentials really safe
+
+Although the `auth-keys.xml` file is marged as ignored in git and subversion,
+it is still in your source tree, and there's always that risk that it may
+creep out.
+
+You can avoid this by keeping your keys outside the source tree and
+using an absolute XInclude reference to it.
+
+```xml
+<configuration>
+
+ <include xmlns="http://www.w3.org/2001/XInclude"
+ href="file:///users/qe/.auth-keys.xml" />
+
+</configuration>
+```
+
+### Cleaning up Containers
+
+The Azure tests create containers with the prefix `"wasbtests-"` and delete
+them after the test runs. If a test run is interrupted, these containers
+may not get deleted. There is a special test case which can be manually invoked
+to list and delete these, `CleanupTestContainers`
+
+```bash
+mvn test -Dtest=CleanupTestContainers
+```
+
+This will delete the containers; the output log of the test run will
+provide the details and summary of the operation.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
index d04a19c..0d3a06c 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
@@ -18,15 +18,21 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assume.assumeNotNull;
+import java.io.IOException;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.junit.Assume.assumeNotNull;
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+
/**
* Abstract test class that provides basic setup and teardown of testing Azure
* Storage account. Each subclass defines a different set of test cases to run
@@ -34,41 +40,137 @@ import org.slf4j.LoggerFactory;
* to run those tests. The returned account might integrate with Azure Storage
* directly or it might be a mock implementation.
*/
-public abstract class AbstractWasbTestBase {
+public abstract class AbstractWasbTestBase extends AbstractWasbTestWithTimeout
+ implements AzureTestConstants {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractWasbTestBase.class);
- @VisibleForTesting
protected NativeAzureFileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
+ protected AzureBlobStorageTestAccount testAccount;
@Before
public void setUp() throws Exception {
- testAccount = createTestAccount();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
+ AzureBlobStorageTestAccount account = createTestAccount();
+ assumeNotNull(account);
+ bindToTestAccount(account);
}
@After
public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
+ describe("closing test account and filesystem");
+ testAccount = cleanupTestAccount(testAccount);
+ IOUtils.closeStream(fs);
+ fs = null;
}
- public Configuration getConfiguration() {
- return new Configuration();
+ /**
+ * Create the configuration to use when creating a test account.
+ * Subclasses can override this to tune the test account configuration.
+ * @return a configuration.
+ */
+ public Configuration createConfiguration() {
+ return AzureBlobStorageTestAccount.createTestConfiguration();
}
+ /**
+ * Create the test account.
+ * Subclasses must implement this.
+ * @return the test account.
+ * @throws Exception
+ */
protected abstract AzureBlobStorageTestAccount createTestAccount()
throws Exception;
+ /**
+ * Get the test account.
+ * @return the current test account.
+ */
protected AzureBlobStorageTestAccount getTestAccount() {
return testAccount;
}
+
+ /**
+ * Get the filesystem
+ * @return the current filesystem.
+ */
+ protected NativeAzureFileSystem getFileSystem() {
+ return fs;
+ }
+
+ /**
+ * Get the configuration used to create the filesystem
+ * @return the configuration of the test FS
+ */
+ protected Configuration getConfiguration() {
+ return getFileSystem().getConf();
+ }
+
+ /**
+ * Bind to a new test account; closing any existing one.
+ * This updates the test account returned in {@link #getTestAccount()}
+ * and the filesystem in {@link #getFileSystem()}.
+ * @param account new test account
+ */
+ protected void bindToTestAccount(AzureBlobStorageTestAccount account) {
+ // clean any existing test account
+ cleanupTestAccount(testAccount);
+ IOUtils.closeStream(fs);
+ testAccount = account;
+ if (testAccount != null) {
+ fs = testAccount.getFileSystem();
+ }
+ }
+
+ /**
+ * Return a path to a blob which will be unique for this fork.
+ * @param filepath filepath
+ * @return a path under the default blob directory
+ * @throws IOException
+ */
+ protected Path blobPath(String filepath) throws IOException {
+ return blobPathForTests(getFileSystem(), filepath);
+ }
+
+ /**
+ * Create a path under the test path provided by
+ * the FS contract.
+ * @param filepath path string in
+ * @return a path qualified by the test filesystem
+ * @throws IOException IO problems
+ */
+ protected Path path(String filepath) throws IOException {
+ return pathForTests(getFileSystem(), filepath);
+ }
+
+ /**
+ * Return a path bonded to this method name, unique to this fork during
+ * parallel execution.
+ * @return a method name unique to (fork, method).
+ * @throws IOException IO problems
+ */
+ protected Path methodPath() throws IOException {
+ return path(methodName.getMethodName());
+ }
+
+ /**
+ * Return a blob path bonded to this method name, unique to this fork during
+ * parallel execution.
+ * @return a method name unique to (fork, method).
+ * @throws IOException IO problems
+ */
+ protected Path methodBlobPath() throws IOException {
+ return blobPath(methodName.getMethodName());
+ }
+
+ /**
+ * Describe a test in the logs.
+ * @param text text to print
+ * @param args arguments to format in the printing
+ */
+ protected void describe(String text, Object... args) {
+ LOG.info("\n\n{}: {}\n",
+ methodName.getMethodName(),
+ String.format(text, args));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java
new file mode 100644
index 0000000..b7076a4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+
+/**
+ * Base class for any Wasb test with timeouts & named threads.
+ * This class does not attempt to bind to Azure.
+ */
+public class AbstractWasbTestWithTimeout extends Assert {
+
+ /**
+ * The name of the current method.
+ */
+ @Rule
+ public TestName methodName = new TestName();
+ /**
+ * Set the timeout for every test.
+ * This is driven by the value returned by {@link #getTestTimeoutMillis()}.
+ */
+ @Rule
+ public Timeout testTimeout = new Timeout(getTestTimeoutMillis());
+
+ /**
+ * Name the junit thread for the class. This will overridden
+ * before the individual test methods are run.
+ */
+ @BeforeClass
+ public static void nameTestThread() {
+ Thread.currentThread().setName("JUnit");
+ }
+
+ /**
+ * Name the thread to the current test method.
+ */
+ @Before
+ public void nameThread() {
+ Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
+ }
+
+ /**
+ * Override point: the test timeout in milliseconds.
+ * @return a timeout in milliseconds
+ */
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.AZURE_TEST_TIMEOUT;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index 7fa59ce..5b36c87 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -21,12 +21,15 @@ package org.apache.hadoop.fs.azure;
import com.microsoft.azure.storage.*;
import com.microsoft.azure.storage.blob.*;
import com.microsoft.azure.storage.core.Base64;
-import org.apache.commons.configuration2.SubsetConfiguration;
+import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.commons.configuration2.SubsetConfiguration;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
import org.apache.hadoop.metrics2.AbstractMetric;
@@ -35,6 +38,8 @@ import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
+import java.io.File;
+import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.*;
@@ -46,10 +51,10 @@ import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECU
/**
* Helper class to create WASB file systems backed by either a mock in-memory
- * implementation or a real Azure Storage account. See RunningLiveWasbTests.txt
- * for instructions on how to connect to a real Azure Storage account.
+ * implementation or a real Azure Storage account.
*/
-public final class AzureBlobStorageTestAccount {
+public final class AzureBlobStorageTestAccount implements AutoCloseable,
+ AzureTestConstants {
private static final Logger LOG = LoggerFactory.getLogger(
AzureBlobStorageTestAccount.class);
@@ -166,6 +171,7 @@ public final class AzureBlobStorageTestAccount {
return new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
}
+ @Deprecated
public static Path pageBlobPath(String fileName) {
return new Path(pageBlobPath(), fileName);
}
@@ -201,6 +207,9 @@ public final class AzureBlobStorageTestAccount {
* @return
*/
private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
+ Assert.assertNotNull("null filesystem", fs);
+ Assert.assertNotNull("null filesystemn instance ID",
+ fs.getInstrumentation().getFileSystemInstanceId());
String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
for (MetricsTag currentTag : currentRecord.tags()) {
if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
@@ -247,13 +256,16 @@ public final class AzureBlobStorageTestAccount {
getBlobReference(blobKey).releaseLease(accessCondition);
}
- private static void saveMetricsConfigFile() {
+ private static void saveMetricsConfigFile() throws IOException {
if (!metricsConfigSaved) {
+ String testFilename = TestMetricsConfig.getTestFilename(
+ "hadoop-metrics2-azure-file-system");
+ File dest = new File(testFilename).getCanonicalFile();
+ dest.getParentFile().mkdirs();
new org.apache.hadoop.metrics2.impl.ConfigBuilder()
.add("azure-file-system.sink.azuretestcollector.class",
StandardCollector.class.getName())
- .save(TestMetricsConfig.getTestFilename(
- "hadoop-metrics2-azure-file-system.properties"));
+ .save(testFilename);
metricsConfigSaved = true;
}
}
@@ -314,9 +326,8 @@ public final class AzureBlobStorageTestAccount {
Configuration conf = createTestConfiguration();
if (!conf.getBoolean(USE_EMULATOR_PROPERTY_NAME, false)) {
// Not configured to test against the storage emulator.
- LOG.warn("Skipping emulator Azure test because configuration doesn't "
- + "indicate that it's running. Please see RunningLiveWasbTests.txt "
- + "for guidance.");
+ LOG.warn("Skipping emulator Azure test because configuration "
+ + "doesn't indicate that it's running.");
return null;
}
CloudStorageAccount account =
@@ -482,8 +493,7 @@ public final class AzureBlobStorageTestAccount {
credentials = StorageCredentialsAnonymous.ANONYMOUS;
} else {
LOG.warn("Skipping live Azure test because of missing key for"
- + " account '" + accountName + "'. "
- + "Please see RunningLiveWasbTests.txt for guidance.");
+ + " account '" + accountName + "'.");
return null;
}
} else {
@@ -517,8 +527,7 @@ public final class AzureBlobStorageTestAccount {
throws URISyntaxException, KeyProviderException {
String testAccountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
if (testAccountName == null) {
- LOG.warn("Skipping live Azure test because of missing test account. "
- + "Please see RunningLiveWasbTests.txt for guidance.");
+ LOG.warn("Skipping live Azure test because of missing test account");
return null;
}
return createStorageAccount(testAccountName, conf, false);
@@ -863,6 +872,11 @@ public final class AzureBlobStorageTestAccount {
}
}
+ @Override
+ public void close() throws Exception {
+ cleanup();
+ }
+
public NativeAzureFileSystem getFileSystem() {
return fs;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java
new file mode 100644
index 0000000..7e733dc
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+
+/**
+ * Handle OOB IO into a shared container.
+ */
+public class ITestAzureConcurrentOutOfBandIo extends AbstractWasbTestBase {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ITestAzureConcurrentOutOfBandIo.class);
+
+ // Class constants.
+ static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
+ static final int UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
+ static final int BLOB_SIZE = 32 * 1024 * 1024;
+
+ // Number of blocks to be written before flush.
+ static final int NUMBER_OF_BLOCKS = 2;
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createOutOfBandStore(
+ UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
+ }
+
+ class DataBlockWriter implements Runnable {
+
+ Thread runner;
+ AzureBlobStorageTestAccount writerStorageAccount;
+ String key;
+ boolean done = false;
+
+ /**
+ * Constructor captures the test account.
+ *
+ * @param testAccount
+ */
+ public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) {
+ writerStorageAccount = testAccount;
+ this.key = key;
+ }
+
+ /**
+ * Start writing blocks to Azure storage.
+ */
+ public void startWriting() {
+ runner = new Thread(this); // Create the block writer thread.
+ runner.start(); // Start the block writer thread.
+ }
+
+ /**
+ * Stop writing blocks to Azure storage.
+ */
+ public void stopWriting() {
+ done = true;
+ }
+
+ /**
+ * Implementation of the runnable interface. The run method is a tight loop
+ * which repeatedly updates the blob with a 4 MB block.
+ */
+ public void run() {
+ byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
+
+ OutputStream outputStream = null;
+
+ try {
+ for (int i = 0; !done; i++) {
+ // Write two 4 MB blocks to the blob.
+ //
+ outputStream = writerStorageAccount.getStore().storefile(
+ key,
+ new PermissionStatus("", "", FsPermission.getDefault()),
+ key);
+
+ Arrays.fill(dataBlockWrite, (byte) (i % 256));
+ for (int j = 0; j < NUMBER_OF_BLOCKS; j++) {
+ outputStream.write(dataBlockWrite);
+ }
+
+ outputStream.flush();
+ outputStream.close();
+ }
+ } catch (AzureException e) {
+ LOG.error("DatablockWriter thread encountered a storage exception."
+ + e.getMessage(), e);
+ } catch (IOException e) {
+ LOG.error("DatablockWriter thread encountered an I/O exception."
+ + e.getMessage(), e);
+ }
+ }
+ }
+
+ @Test
+ public void testReadOOBWrites() throws Exception {
+
+ byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
+ byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
+
+ // Write to blob to make sure it exists.
+ //
+ // Write five 4 MB blocks to the blob. To ensure there is data in the blob before
+ // reading. This eliminates the race between the reader and writer threads.
+ String key = "WASB_String" + AzureTestUtils.getForkID() + ".txt";
+ OutputStream outputStream = testAccount.getStore().storefile(
+ key,
+ new PermissionStatus("", "", FsPermission.getDefault()),
+ key);
+ Arrays.fill(dataBlockWrite, (byte) 255);
+ for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
+ outputStream.write(dataBlockWrite);
+ }
+
+ outputStream.flush();
+ outputStream.close();
+
+ // Start writing blocks to Azure store using the DataBlockWriter thread.
+ DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount, key);
+ writeBlockTask.startWriting();
+ int count = 0;
+
+ for (int i = 0; i < 5; i++) {
+ try(InputStream inputStream = testAccount.getStore().retrieve(key)) {
+ count = 0;
+ int c = 0;
+
+ while (c >= 0) {
+ c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
+ if (c < 0) {
+ break;
+ }
+
+ // Counting the number of bytes.
+ count += c;
+ }
+ } catch (IOException e) {
+ System.out.println(e.getCause().toString());
+ e.printStackTrace();
+ fail();
+ }
+ }
+
+ // Stop writing blocks.
+ writeBlockTask.stopWriting();
+
+ // Validate that a block was read.
+ assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java
new file mode 100644
index 0000000..2b0ea56
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIoWithSecureMode.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+/**
+ * Extends ITestAzureConcurrentOutOfBandIo in order to run testReadOOBWrites with secure mode
+ * (fs.azure.secure.mode) both enabled and disabled.
+ */
+public class ITestAzureConcurrentOutOfBandIoWithSecureMode
+ extends ITestAzureConcurrentOutOfBandIo {
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createOutOfBandStore(
+ UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE, true);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f6b08f8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java
new file mode 100644
index 0000000..49e6730
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureFileSystemErrorConditions.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.concurrent.Callable;
+
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageEvent;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.junit.Assume.assumeNotNull;
+
+/**
+ * Error handling.
+ */
+public class ITestAzureFileSystemErrorConditions extends
+ AbstractWasbTestWithTimeout {
+ private static final int ALL_THREE_FILE_SIZE = 1024;
+
+ @Test
+ public void testNoInitialize() throws Exception {
+ intercept(AssertionError.class,
+ new Callable<FileMetadata>() {
+ @Override
+ public FileMetadata call() throws Exception {
+ return new AzureNativeFileSystemStore()
+ .retrieveMetadata("foo");
+ }
+ });
+ }
+
+ /**
+ * Try accessing an unauthorized or non-existent (treated the same) container
+ * from WASB.
+ */
+ @Test
+ public void testAccessUnauthorizedPublicContainer() throws Exception {
+ final String container = "nonExistentContainer";
+ final String account = "hopefullyNonExistentAccount";
+ Path noAccessPath = new Path(
+ "wasb://" + container + "@" + account + "/someFile");
+ NativeAzureFileSystem.suppressRetryPolicy();
+ try {
+ FileSystem.get(noAccessPath.toUri(), new Configuration())
+ .open(noAccessPath);
+ assertTrue("Should've thrown.", false);
+ } catch (AzureException ex) {
+ GenericTestUtils.assertExceptionContains(
+ String.format(NO_ACCESS_TO_CONTAINER_MSG, account, container), ex);
+ } finally {
+ NativeAzureFileSystem.resumeRetryPolicy();
+ }
+ }
+
+ @Test
+ public void testAccessContainerWithWrongVersion() throws Exception {
+ AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
+ MockStorageInterface mockStorage = new MockStorageInterface();
+ store.setAzureStorageInteractionLayer(mockStorage);
+ try (FileSystem fs = new NativeAzureFileSystem(store)) {
+ Configuration conf = new Configuration();
+ AzureBlobStorageTestAccount.setMockAccountKey(conf);
+ HashMap<String, String> metadata = new HashMap<String, String>();
+ metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
+ "2090-04-05"); // It's from the future!
+ mockStorage.addPreExistingContainer(
+ AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
+
+ AzureException ex = intercept(AzureException.class,
+ new Callable<FileStatus[]>() {
+ @Override
+ public FileStatus[] call() throws Exception {
+ fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI),
+ conf);
+ return fs.listStatus(new Path("/"));
+ }
+ });
+ GenericTestUtils.assertExceptionContains(
+ "unsupported version: 2090-04-05.", ex);
+ }
+ }
+
+ private interface ConnectionRecognizer {
+ boolean isTargetConnection(HttpURLConnection connection);
+ }
+
+ private class TransientErrorInjector extends StorageEvent<SendingRequestEvent> {
+ private final ConnectionRecognizer connectionRecognizer;
+ private boolean injectedErrorOnce = false;
+
+ public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
+ this.connectionRecognizer = connectionRecognizer;
+ }
+
+ @Override
+ public void eventOccurred(SendingRequestEvent eventArg) {
+ HttpURLConnection connection
+ = (HttpURLConnection) eventArg.getConnectionObject();
+ if (!connectionRecognizer.isTargetConnection(connection)) {
+ return;
+ }
+ if (!injectedErrorOnce) {
+ connection.setReadTimeout(1);
+ connection.disconnect();
+ injectedErrorOnce = true;
+ }
+ }
+ }
+
+ private void injectTransientError(NativeAzureFileSystem fs,
+ final ConnectionRecognizer connectionRecognizer) {
+ fs.getStore().addTestHookToOperationContext(new TestHookOperationContext() {
+ @Override
+ public OperationContext modifyOperationContext(OperationContext original) {
+ original.getSendingRequestEventHandler().addListener(
+ new TransientErrorInjector(connectionRecognizer));
+ return original;
+ }
+ });
+ }
+
+ @Test
+ public void testTransientErrorOnDelete() throws Exception {
+ // Need to do this test against a live storage account
+ AzureBlobStorageTestAccount testAccount =
+ AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+ try {
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ injectTransientError(fs, new ConnectionRecognizer() {
+ @Override
+ public boolean isTargetConnection(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("DELETE");
+ }
+ });
+ Path testFile = new Path("/a/b");
+ assertTrue(fs.createNewFile(testFile));
+ assertTrue(fs.rename(testFile, new Path("/x")));
+ } finally {
+ testAccount.cleanup();
+ }
+ }
+
+ private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
+ throws IOException {
+ byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
+ Arrays.fill(buffer, (byte) 3);
+ try(OutputStream stream = fs.create(testFile)) {
+ stream.write(buffer);
+ }
+ }
+
+ private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)
+ throws IOException {
+ byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
+ InputStream inStream = fs.open(testFile);
+ assertEquals(buffer.length,
+ inStream.read(buffer, 0, buffer.length));
+ inStream.close();
+ for (int i = 0; i < buffer.length; i++) {
+ assertEquals(3, buffer[i]);
+ }
+ }
+
+ @Test
+ public void testTransientErrorOnCommitBlockList() throws Exception {
+ // Need to do this test against a live storage account
+ AzureBlobStorageTestAccount testAccount =
+ AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+ try {
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ injectTransientError(fs, new ConnectionRecognizer() {
+ @Override
+ public boolean isTargetConnection(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("PUT")
+ && connection.getURL().getQuery() != null
+ && connection.getURL().getQuery().contains("blocklist");
+ }
+ });
+ Path testFile = new Path("/a/b");
+ writeAllThreeFile(fs, testFile);
+ readAllThreeFile(fs, testFile);
+ } finally {
+ testAccount.cleanup();
+ }
+ }
+
+ @Test
+ public void testTransientErrorOnRead() throws Exception {
+ // Need to do this test against a live storage account
+ AzureBlobStorageTestAccount testAccount =
+ AzureBlobStorageTestAccount.create();
+ assumeNotNull(testAccount);
+ try {
+ NativeAzureFileSystem fs = testAccount.getFileSystem();
+ Path testFile = new Path("/a/b");
+ writeAllThreeFile(fs, testFile);
+ injectTransientError(fs, new ConnectionRecognizer() {
+ @Override
+ public boolean isTargetConnection(HttpURLConnection connection) {
+ return connection.getRequestMethod().equals("GET");
+ }
+ });
+ readAllThreeFile(fs, testFile);
+ } finally {
+ testAccount.cleanup();
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/20] hadoop git commit: HADOOP-14553. Add (parallelized)
integration tests to hadoop-azure Contributed by Steve Loughran
Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a3f2843..4bf6f04 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assume;
-import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -49,8 +48,8 @@ public class TestNativeAzureFileSystemAuthorization
protected MockWasbAuthorizerImpl authorizer;
@Override
- public Configuration getConfiguration() {
- Configuration conf = super.getConfiguration();
+ public Configuration createConfiguration() {
+ Configuration conf = super.createConfiguration();
conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost/");
conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "user1 , user2");
@@ -59,13 +58,12 @@ public class TestNativeAzureFileSystemAuthorization
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = getConfiguration();
- return AzureBlobStorageTestAccount.create(conf);
+ return AzureBlobStorageTestAccount.create(createConfiguration());
}
-
- @Before
- public void beforeMethod() {
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
@@ -76,7 +74,6 @@ public class TestNativeAzureFileSystemAuthorization
fs.updateWasbAuthorizer(authorizer);
}
-
@Rule
public ExpectedException expectedEx = ExpectedException.none();
@@ -95,7 +92,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Setup the expected exception class, and exception message that the test is supposed to fail with
+ * Setup the expected exception class, and exception message that the test is supposed to fail with.
*/
protected void setExpectedFailureMessage(String operation, Path path) {
expectedEx.expect(WasbAuthorizationException.class);
@@ -104,7 +101,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify Create access check
+ * Positive test to verify Create access check.
* The file is created directly under an existing folder.
* No intermediate folders need to be created.
* @throws Throwable
@@ -128,7 +125,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify Create access check
+ * Positive test to verify Create access check.
* The test tries to create a file whose parent is non-existent to ensure that
* the intermediate folders between ancestor and direct parent are being created
* when proper ranger policies are configured.
@@ -155,7 +152,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
- * Negative test to verify that create fails when trying to overwrite an existing file
+ * Negative test to verify that create fails when trying to overwrite an existing file.
* without proper write permissions on the file being overwritten.
* @throws Throwable
*/
@@ -181,7 +178,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify that create succeeds when trying to overwrite an existing file
+ * Positive test to verify that create succeeds when trying to overwrite an existing file.
* when proper write permissions on the file being overwritten are provided.
* @throws Throwable
*/
@@ -232,7 +229,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify listStatus access check
+ * Positive test to verify listStatus access check.
* @throws Throwable
*/
@Test
@@ -257,7 +254,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test to verify listStatus access check
+ * Negative test to verify listStatus access check.
* @throws Throwable
*/
@@ -342,7 +339,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test to verify rename access check - the dstFolder disallows rename
+ * Negative test to verify rename access check - the dstFolder disallows rename.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@@ -373,7 +370,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify rename access check - the dstFolder allows rename
+ * Positive test to verify rename access check - the dstFolder allows rename.
* @throws Throwable
*/
@Test
@@ -484,7 +481,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test to verify file delete access check
+ * Positive test to verify file delete access check.
* @throws Throwable
*/
@Test
@@ -506,7 +503,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test to verify file delete access check
+ * Negative test to verify file delete access check.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@@ -544,7 +541,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Positive test to verify file delete access check, with intermediate folders
- * Uses wildcard recursive permissions
+ * Uses wildcard recursive permissions.
* @throws Throwable
*/
@Test
@@ -582,7 +579,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test for mkdirs access check
+ * Positive test for mkdirs access check.
* @throws Throwable
*/
@Test
@@ -668,7 +665,7 @@ public class TestNativeAzureFileSystemAuthorization
}
}
/**
- * Negative test for mkdirs access check
+ * Negative test for mkdirs access check.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@@ -692,7 +689,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test triple slash format (wasb:///) access check
+ * Positive test triple slash format (wasb:///) access check.
* @throws Throwable
*/
@Test
@@ -708,7 +705,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Negative test for setOwner when Authorization is enabled
+ * Negative test for setOwner when Authorization is enabled.
*/
@Test
public void testSetOwnerThrowsForUnauthorisedUsers() throws Throwable {
@@ -744,7 +741,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Test for setOwner when Authorization is enabled and
- * the user is specified in chown allowed user list
+ * the user is specified in chown allowed user list.
* */
@Test
public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable {
@@ -785,7 +782,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Test for setOwner when Authorization is enabled and
- * the userlist is specified as '*'
+ * the userlist is specified as '*'.
* */
@Test
public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throwable {
@@ -829,7 +826,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/** Test for setOwner throws for illegal setup of chown
- * allowed testSetOwnerSucceedsForAuthorisedUsers
+ * allowed testSetOwnerSucceedsForAuthorisedUsers.
*/
@Test
public void testSetOwnerFailsForIllegalSetup() throws Throwable {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
deleted file mode 100644
index 4bd4633..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.Test;
-import org.junit.Before;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test class that runs wasb authorization tests with owner check enabled.
- */
-public class TestNativeAzureFileSystemAuthorizationWithOwner
- extends TestNativeAzureFileSystemAuthorization {
-
- @Before
- public void beforeMethod() {
- super.beforeMethod();
- authorizer.init(fs.getConf(), true);
- }
-
- /**
- * Test case when owner matches current user
- */
- @Test
- public void testOwnerPermissionPositive() throws Throwable {
-
- Path parentDir = new Path("/testOwnerPermissionPositive");
- Path testPath = new Path(parentDir, "test.data");
-
- authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
- authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
- // additional rule used for assertPathExists
- authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.READ.toString(), true);
- fs.updateWasbAuthorizer(authorizer);
-
- try {
- // creates parentDir with owner as current user
- fs.mkdirs(parentDir);
- ContractTestUtils.assertPathExists(fs, "parentDir does not exist", parentDir);
-
- fs.create(testPath);
- fs.getFileStatus(testPath);
- ContractTestUtils.assertPathExists(fs, "testPath does not exist", testPath);
-
- } finally {
- allowRecursiveDelete(fs, parentDir.toString());
- fs.delete(parentDir, true);
- }
- }
-
- /**
- * Negative test case for owner does not match current user
- */
- @Test
- public void testOwnerPermissionNegative() throws Throwable {
- expectedEx.expect(WasbAuthorizationException.class);
-
- Path parentDir = new Path("/testOwnerPermissionNegative");
- Path childDir = new Path(parentDir, "childDir");
-
- setExpectedFailureMessage("mkdirs", childDir);
-
- authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
-
- fs.updateWasbAuthorizer(authorizer);
-
- try{
- fs.mkdirs(parentDir);
- UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting(
- "testuser", new String[] {});
-
- ugiSuperUser.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- fs.mkdirs(childDir);
- return null;
- }
- });
-
- } finally {
- allowRecursiveDelete(fs, parentDir.toString());
- fs.delete(parentDir, true);
- }
- }
-
- /**
- * Test to verify that retrieving owner information does not
- * throw when file/folder does not exist
- */
- @Test
- public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {
-
- Path testdirectory = new Path("/testDirectory123454565");
-
- String owner = fs.getOwnerForPath(testdirectory);
- assertEquals("", owner);
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
index b2660bb..b280cac 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
@@ -29,7 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
-public class TestNativeAzureFileSystemBlockLocations {
+/**
+ * Test block location logic.
+ */
+public class TestNativeAzureFileSystemBlockLocations
+ extends AbstractWasbTestWithTimeout {
@Test
public void testNumberOfBlocks() throws Exception {
Configuration conf = new Configuration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
deleted file mode 100644
index 4114e60..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.net.URI;
-import java.util.StringTokenizer;
-
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Logger;
-import org.junit.Test;
-
-/**
- * Test to validate Azure storage client side logging. Tests works only when
- * testing with Live Azure storage because Emulator does not have support for
- * client-side logging.
- *
- */
-public class TestNativeAzureFileSystemClientLogging
- extends AbstractWasbTestBase {
-
- private AzureBlobStorageTestAccount testAccount;
-
- // Core-site config controlling Azure Storage Client logging
- private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
-
- // Temporary directory created using WASB.
- private static final String TEMP_DIR = "tempDir";
-
- /*
- * Helper method to verify the client logging is working. This check primarily
- * checks to make sure we see a line in the logs corresponding to the entity
- * that is created during test run.
- */
- private boolean verifyStorageClientLogs(String capturedLogs, String entity)
- throws Exception {
-
- URI uri = testAccount.getRealAccount().getBlobEndpoint();
- String container = testAccount.getRealContainer().getName();
- String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
- + entity;
- boolean entityFound = false;
-
- StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
-
- while (tokenizer.hasMoreTokens()) {
- String token = tokenizer.nextToken();
- if (token.contains(validateString)) {
- entityFound = true;
- break;
- }
- }
- return entityFound;
- }
-
- /*
- * Helper method that updates the core-site config to enable/disable logging.
- */
- private void updateFileSystemConfiguration(Boolean loggingFlag)
- throws Exception {
-
- Configuration conf = fs.getConf();
- conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
- URI uri = fs.getUri();
- fs.initialize(uri, conf);
- }
-
- // Using WASB code to communicate with Azure Storage.
- private void performWASBOperations() throws Exception {
-
- Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
- fs.mkdirs(tempDir);
- fs.delete(tempDir, true);
- }
-
- @Test
- public void testLoggingEnabled() throws Exception {
-
- LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
- .getRootLogger()));
-
- // Update configuration based on the Test.
- updateFileSystemConfiguration(true);
-
- performWASBOperations();
-
- String output = getLogOutput(logs);
- assertTrue("Log entry " + TEMP_DIR + " not found in " + output,
- verifyStorageClientLogs(output, TEMP_DIR));
- }
-
- protected String getLogOutput(LogCapturer logs) {
- String output = logs.getOutput();
- assertTrue("No log created/captured", !output.isEmpty());
- return output;
- }
-
- @Test
- public void testLoggingDisabled() throws Exception {
-
- LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
- .getRootLogger()));
-
- // Update configuration based on the Test.
- updateFileSystemConfiguration(false);
-
- performWASBOperations();
- String output = getLogOutput(logs);
-
- assertFalse("Log entry " + TEMP_DIR + " found in " + output,
- verifyStorageClientLogs(output, TEMP_DIR));
- }
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- return testAccount;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
index cbfc563..655ae90 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
@@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
@@ -33,32 +28,30 @@ import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
-public class TestNativeAzureFileSystemConcurrency {
- private AzureBlobStorageTestAccount testAccount;
- private FileSystem fs;
+public class TestNativeAzureFileSystemConcurrency extends AbstractWasbTestBase {
private InMemoryBlockBlobStore backingStore;
- @Before
+ @Override
public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createMock();
- fs = testAccount.getFileSystem();
- backingStore = testAccount.getMockStorage().getBackingStore();
+ super.setUp();
+ backingStore = getTestAccount().getMockStorage().getBackingStore();
}
- @After
+ @Override
public void tearDown() throws Exception {
- testAccount.cleanup();
- fs = null;
+ super.tearDown();
backingStore = null;
}
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createMock();
+ }
+
@Test
public void testLinkBlobs() throws Exception {
Path filePath = new Path("/inProgress");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
deleted file mode 100644
index 7c5899d..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-/***
- * Test class to hold all Live Azure storage concurrency tests.
- */
-public class TestNativeAzureFileSystemConcurrencyLive
- extends AbstractWasbTestBase {
-
- private static final int THREAD_COUNT = 102;
- private static final int TEST_EXECUTION_TIMEOUT = 5000;
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- /**
- * Validate contract for FileSystem.create when overwrite is true and there
- * are concurrent callers of FileSystem.delete. An existing file should be
- * overwritten, even if the original destination exists but is deleted by an
- * external agent during the create operation.
- */
- @Test(timeout = TEST_EXECUTION_TIMEOUT)
- public void testConcurrentCreateDeleteFile() throws Exception {
- Path testFile = new Path("test.dat");
-
- List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
-
- for (int i = 0; i < THREAD_COUNT; i++) {
- tasks.add(new CreateFileTask(fs, testFile));
- }
-
- ExecutorService es = null;
-
- try {
- es = Executors.newFixedThreadPool(THREAD_COUNT);
-
- List<Future<Void>> futures = es.invokeAll(tasks);
-
- for (Future<Void> future : futures) {
- Assert.assertTrue(future.isDone());
-
- // we are using Callable<V>, so if an exception
- // occurred during the operation, it will be thrown
- // when we call get
- Assert.assertEquals(null, future.get());
- }
- } finally {
- if (es != null) {
- es.shutdownNow();
- }
- }
- }
-
- /**
- * Validate contract for FileSystem.delete when invoked concurrently.
- * One of the threads should successfully delete the file and return true;
- * all other threads should return false.
- */
- @Test(timeout = TEST_EXECUTION_TIMEOUT)
- public void testConcurrentDeleteFile() throws Exception {
- Path testFile = new Path("test.dat");
- fs.create(testFile).close();
-
- List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT);
-
- for (int i = 0; i < THREAD_COUNT; i++) {
- tasks.add(new DeleteFileTask(fs, testFile));
- }
-
- ExecutorService es = null;
- try {
- es = Executors.newFixedThreadPool(THREAD_COUNT);
-
- List<Future<Boolean>> futures = es.invokeAll(tasks);
-
- int successCount = 0;
- for (Future<Boolean> future : futures) {
- Assert.assertTrue(future.isDone());
-
- // we are using Callable<V>, so if an exception
- // occurred during the operation, it will be thrown
- // when we call get
- Boolean success = future.get();
- if (success) {
- successCount++;
- }
- }
-
- Assert.assertEquals(
- "Exactly one delete operation should return true.",
- 1,
- successCount);
- } finally {
- if (es != null) {
- es.shutdownNow();
- }
- }
- }
-}
-
-abstract class FileSystemTask<V> implements Callable<V> {
- private final FileSystem fileSystem;
- private final Path path;
-
- protected FileSystem getFileSystem() {
- return this.fileSystem;
- }
-
- protected Path getFilePath() {
- return this.path;
- }
-
- FileSystemTask(FileSystem fs, Path p) {
- this.fileSystem = fs;
- this.path = p;
- }
-
- public abstract V call() throws Exception;
-}
-
-class DeleteFileTask extends FileSystemTask<Boolean> {
-
- DeleteFileTask(FileSystem fs, Path p) {
- super(fs, p);
- }
-
- @Override
- public Boolean call() throws Exception {
- return this.getFileSystem().delete(this.getFilePath(), false);
- }
-}
-
-class CreateFileTask extends FileSystemTask<Void> {
- CreateFileTask(FileSystem fs, Path p) {
- super(fs, p);
- }
-
- public Void call() throws Exception {
- FileSystem fs = getFileSystem();
- Path p = getFilePath();
-
- // Create an empty file and close the stream.
- FSDataOutputStream stream = fs.create(p, true);
- stream.close();
-
- // Delete the file. We don't care if delete returns true or false.
- // We just want to ensure the file does not exist.
- this.getFileSystem().delete(this.getFilePath(), false);
-
- return null;
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
deleted file mode 100644
index 217ca81..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import org.junit.Before;
-
-public class TestNativeAzureFileSystemContractEmulator extends
- FileSystemContractBaseTest {
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createForEmulator();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(fs);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
deleted file mode 100644
index b546009..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemContractLive extends
- FileSystemContractBaseTest {
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(fs);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- /**
- * The following tests are failing on Azure and the Azure
- * file system code needs to be modified to make them pass.
- * A separate work item has been opened for this.
- */
- @Ignore
- @Test
- public void testMoveFileUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameFileToSelf() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameChildDirForbidden() throws Exception {
- }
-
- @Ignore
- @Test
- public void testMoveDirUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameDirToSelf() throws Throwable {
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
index f458bb3..2809260 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
@@ -23,6 +23,9 @@ import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
+/**
+ * Mocked testing of FileSystemContractBaseTest.
+ */
public class TestNativeAzureFileSystemContractMocked extends
FileSystemContractBaseTest {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
deleted file mode 100644
index 2a88ad2..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.After;
-import static org.junit.Assume.assumeNotNull;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestNativeAzureFileSystemContractPageBlobLive extends
- FileSystemContractBaseTest {
- private AzureBlobStorageTestAccount testAccount;
-
- private AzureBlobStorageTestAccount createTestAccount()
- throws Exception {
- Configuration conf = new Configuration();
-
- // Configure the page blob directories key so every file created is a page blob.
- conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
-
- // Configure the atomic rename directories key so every folder will have
- // atomic rename applied.
- conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
- return AzureBlobStorageTestAccount.create(conf);
- }
-
- @Before
- public void setUp() throws Exception {
- testAccount = createTestAccount();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(fs);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- /**
- * The following tests are failing on Azure and the Azure
- * file system code needs to be modified to make them pass.
- * A separate work item has been opened for this.
- */
- @Ignore
- @Test
- public void testMoveFileUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameFileToSelf() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameChildDirForbidden() throws Exception {
- }
-
- @Ignore
- @Test
- public void testMoveDirUnderParent() throws Throwable {
- }
-
- @Ignore
- @Test
- public void testRenameDirToSelf() throws Throwable {
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
index 82eabaa..0dfbb37 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
@@ -18,17 +18,11 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
import java.io.IOException;
import java.util.HashMap;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
+
import org.junit.Test;
/**
@@ -38,24 +32,18 @@ import org.junit.Test;
* creation/rename of files/directories through WASB that have colons in the
* names.
*/
-public class TestNativeAzureFileSystemFileNameCheck {
- private FileSystem fs = null;
- private AzureBlobStorageTestAccount testAccount = null;
+public class TestNativeAzureFileSystemFileNameCheck extends AbstractWasbTestBase {
private String root = null;
- @Before
+ @Override
public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createMock();
- fs = testAccount.getFileSystem();
+ super.setUp();
root = fs.getUri().toString();
}
- @After
- public void tearDown() throws Exception {
- testAccount.cleanup();
- root = null;
- fs = null;
- testAccount = null;
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createMock();
}
@Test
@@ -138,4 +126,4 @@ public class TestNativeAzureFileSystemFileNameCheck {
fsck.run(new String[] { p.toString() });
return fsck.getPathNameWarning();
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
deleted file mode 100644
index 6baba33..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-import org.junit.Test;
-
-import com.microsoft.azure.storage.StorageException;
-
-/*
- * Tests the Native Azure file system (WASB) against an actual blob store if
- * provided in the environment.
- */
-public class TestNativeAzureFileSystemLive extends
- NativeAzureFileSystemBaseTest {
-
- @Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- @Test
- public void testLazyRenamePendingCanOverwriteExistingFile()
- throws Exception {
- final String SRC_FILE_KEY = "srcFile";
- final String DST_FILE_KEY = "dstFile";
- Path srcPath = new Path(SRC_FILE_KEY);
- FSDataOutputStream srcStream = fs.create(srcPath);
- assertTrue(fs.exists(srcPath));
- Path dstPath = new Path(DST_FILE_KEY);
- FSDataOutputStream dstStream = fs.create(dstPath);
- assertTrue(fs.exists(dstPath));
- NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
- final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath));
- final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath));
- nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null);
- assertTrue(fs.exists(dstPath));
- assertFalse(fs.exists(srcPath));
- IOUtils.cleanup(null, srcStream);
- IOUtils.cleanup(null, dstStream);
- }
- /**
- * Tests fs.delete() function to delete a blob when another blob is holding a
- * lease on it. Delete if called without a lease should fail if another process
- * is holding a lease and throw appropriate exception
- * This is a scenario that would happen in HMaster startup when it tries to
- * clean up the temp dirs while the HMaster process which was killed earlier
- * held lease on the blob when doing some DDL operation
- */
- @Test
- public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
- throws Exception {
- LOG.info("Starting test");
- final String FILE_KEY = "fileWithLease";
- // Create the file
- Path path = new Path(FILE_KEY);
- fs.create(path);
- assertTrue(fs.exists(path));
- NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
- final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
- final AzureNativeFileSystemStore store = nfs.getStore();
-
- // Acquire the lease on the file in a background thread
- final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
- final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
- Thread t = new Thread() {
- @Override
- public void run() {
- // Acquire the lease and then signal the main test thread.
- SelfRenewingLease lease = null;
- try {
- lease = store.acquireLease(fullKey);
- LOG.info("Lease acquired: " + lease.getLeaseID());
- } catch (AzureException e) {
- LOG.warn("Lease acqusition thread unable to acquire lease", e);
- } finally {
- leaseAttemptComplete.countDown();
- }
-
- // Wait for the main test thread to signal it will attempt the delete.
- try {
- beginningDeleteAttempt.await();
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- }
-
- // Keep holding the lease past the lease acquisition retry interval, so
- // the test covers the case of delete retrying to acquire the lease.
- try {
- Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- }
-
- try {
- if (lease != null){
- LOG.info("Freeing lease");
- lease.free();
- }
- } catch (StorageException se) {
- LOG.warn("Unable to free lease.", se);
- }
- }
- };
-
- // Start the background thread and wait for it to signal the lease is held.
- t.start();
- try {
- leaseAttemptComplete.await();
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- }
-
- // Try to delete the same file
- beginningDeleteAttempt.countDown();
- store.delete(fullKey);
-
- // At this point file SHOULD BE DELETED
- assertFalse(fs.exists(path));
- }
-
- /**
- * Check that isPageBlobKey works as expected. This assumes that
- * in the test configuration, the list of supported page blob directories
- * only includes "pageBlobs". That's why this test is made specific
- * to this subclass.
- */
- @Test
- public void testIsPageBlobKey() {
- AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-
- // Use literal strings so it's easier to understand the tests.
- // In case the constant changes, we want to know about it so we can update this test.
- assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs");
-
- // URI prefix for test environment.
- String uriPrefix = "file:///";
-
- // negative tests
- String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo",
- "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" };
- for (String s : negativeKeys) {
- assertFalse(store.isPageBlobKey(s));
- assertFalse(store.isPageBlobKey(uriPrefix + s));
- }
-
- // positive tests
- String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" };
- for (String s : positiveKeys) {
- assertTrue(store.isPageBlobKey(s));
- assertTrue(store.isPageBlobKey(uriPrefix + s));
- }
- }
-
- /**
- * Test that isAtomicRenameKey() works as expected.
- */
- @Test
- public void testIsAtomicRenameKey() {
-
- AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
-
- // We want to know if the default configuration changes so we can fix
- // this test.
- assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES,
- "/atomicRenameDir1,/atomicRenameDir2");
-
- // URI prefix for test environment.
- String uriPrefix = "file:///";
-
- // negative tests
- String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase",
- "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase",
- "hbasexyz/", "foo/atomicRenameDir1/"};
- for (String s : negativeKeys) {
- assertFalse(store.isAtomicRenameKey(s));
- assertFalse(store.isAtomicRenameKey(uriPrefix + s));
- }
-
- // Positive tests. The directories for atomic rename are /hbase
- // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES
- // for this test).
- String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/",
- "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"};
- for (String s : positiveKeys) {
- assertTrue(store.isAtomicRenameKey(s));
- assertTrue(store.isAtomicRenameKey(uriPrefix + s));
- }
- }
-
- /**
- * Tests fs.mkdir() function to create a target blob while another thread
- * is holding the lease on the blob. mkdir should not fail since the blob
- * already exists.
- * This is a scenario that would happen in HBase distributed log splitting.
- * Multiple threads will try to create and update "recovered.edits" folder
- * under the same path.
- */
- @Test
- public void testMkdirOnExistingFolderWithLease() throws Exception {
- SelfRenewingLease lease;
- final String FILE_KEY = "folderWithLease";
- // Create the folder
- fs.mkdirs(new Path(FILE_KEY));
- NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
- String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY)));
- AzureNativeFileSystemStore store = nfs.getStore();
- // Acquire the lease on the folder
- lease = store.acquireLease(fullKey);
- assertTrue(lease.getLeaseID() != null);
- // Try to create the same folder
- store.storeEmptyFolder(fullKey,
- nfs.createPermissionStatus(FsPermission.getDirDefault()));
- lease.free();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
index aa1e4f7..20d45b2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
@@ -21,6 +21,10 @@ package org.apache.hadoop.fs.azure;
import java.io.IOException;
import org.junit.Ignore;
+/**
+ * Run {@link NativeAzureFileSystemBaseTest} tests against a mocked store,
+ * skipping tests of unsupported features
+ */
public class TestNativeAzureFileSystemMocked extends
NativeAzureFileSystemBaseTest {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
index 4c2df8d..7f63295 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
@@ -18,41 +18,27 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests for the upload, buffering and flush logic in WASB.
*/
-public class TestNativeAzureFileSystemUploadLogic {
- private AzureBlobStorageTestAccount testAccount;
+public class TestNativeAzureFileSystemUploadLogic extends AbstractWasbTestBase {
// Just an arbitrary number so that the values I write have a predictable
// pattern: 0, 1, 2, .. , 45, 46, 0, 1, 2, ...
static final int byteValuePeriod = 47;
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.createMock();
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- }
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ return AzureBlobStorageTestAccount.createMock();
}
/**
@@ -126,9 +112,9 @@ public class TestNativeAzureFileSystemUploadLogic {
* @param expectedSize The expected size of the data in there.
*/
private void assertDataInFile(Path file, int expectedSize) throws Exception {
- InputStream inStream = testAccount.getFileSystem().open(file);
- assertDataInStream(inStream, expectedSize);
- inStream.close();
+ try(InputStream inStream = getFileSystem().open(file)) {
+ assertDataInStream(inStream, expectedSize);
+ }
}
/**
@@ -139,7 +125,7 @@ public class TestNativeAzureFileSystemUploadLogic {
private void assertDataInTempBlob(int expectedSize) throws Exception {
// Look for the temporary upload blob in the backing store.
InMemoryBlockBlobStore backingStore =
- testAccount.getMockStorage().getBackingStore();
+ getTestAccount().getMockStorage().getBackingStore();
String tempKey = null;
for (String key : backingStore.getKeys()) {
if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) {
@@ -149,9 +135,10 @@ public class TestNativeAzureFileSystemUploadLogic {
}
}
assertNotNull(tempKey);
- InputStream inStream = new ByteArrayInputStream(backingStore.getContent(tempKey));
- assertDataInStream(inStream, expectedSize);
- inStream.close();
+ try (InputStream inStream = new ByteArrayInputStream(
+ backingStore.getContent(tempKey))) {
+ assertDataInStream(inStream, expectedSize);
+ }
}
/**
@@ -162,25 +149,30 @@ public class TestNativeAzureFileSystemUploadLogic {
*/
private void testConsistencyAfterManyFlushes(FlushFrequencyVariation variation)
throws Exception {
- Path uploadedFile = new Path("/uploadedFile");
- OutputStream outStream = testAccount.getFileSystem().create(uploadedFile);
- final int totalSize = 9123;
- int flushPeriod;
- switch (variation) {
- case BeforeSingleBufferFull: flushPeriod = 300; break;
- case AfterSingleBufferFull: flushPeriod = 600; break;
- case AfterAllRingBufferFull: flushPeriod = 1600; break;
- default:
- throw new IllegalArgumentException("Unknown variation: " + variation);
- }
- for (int i = 0; i < totalSize; i++) {
- outStream.write(i % byteValuePeriod);
- if ((i + 1) % flushPeriod == 0) {
- outStream.flush();
- assertDataInTempBlob(i + 1);
+ Path uploadedFile = methodPath();
+ try {
+ OutputStream outStream = getFileSystem().create(uploadedFile);
+ final int totalSize = 9123;
+ int flushPeriod;
+ switch (variation) {
+ case BeforeSingleBufferFull: flushPeriod = 300; break;
+ case AfterSingleBufferFull: flushPeriod = 600; break;
+ case AfterAllRingBufferFull: flushPeriod = 1600; break;
+ default:
+ throw new IllegalArgumentException("Unknown variation: " + variation);
}
+ for (int i = 0; i < totalSize; i++) {
+ outStream.write(i % byteValuePeriod);
+ if ((i + 1) % flushPeriod == 0) {
+ outStream.flush();
+ assertDataInTempBlob(i + 1);
+ }
+ }
+ outStream.close();
+ assertDataInFile(uploadedFile, totalSize);
+ } finally {
+ getFileSystem().delete(uploadedFile, false);
+
}
- outStream.close();
- assertDataInFile(uploadedFile, totalSize);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
index 544d6ab..303a89a 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
@@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
import java.util.HashMap;
import org.apache.hadoop.fs.FileStatus;
@@ -37,7 +32,8 @@ import org.junit.Test;
* Tests that WASB handles things gracefully when users add blobs to the Azure
* Storage container from outside WASB's control.
*/
-public class TestOutOfBandAzureBlobOperations {
+public class TestOutOfBandAzureBlobOperations
+ extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
deleted file mode 100644
index 60b01c6..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.microsoft.azure.storage.blob.BlobOutputStream;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-
-public class TestOutOfBandAzureBlobOperationsLive {
- private FileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
-
- @Before
- public void setUp() throws Exception {
- testAccount = AzureBlobStorageTestAccount.create();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>")
- // eg oob creation of "user/<name>/testFolder/a/input/file"
- // Then wasb creation of "user/<name>/testFolder/a/output" fails
- @Test
- public void outOfBandFolder_uncleMkdirs() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
-
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder1/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
-
- Path targetFolder = new Path("testFolder1/a/output");
- assertTrue(fs.mkdirs(targetFolder));
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_parentDelete() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder2/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
-
- Path targetFolder = new Path("testFolder2/a/input");
- assertTrue(fs.delete(targetFolder, true));
- }
-
- @Test
- public void outOfBandFolder_rootFileDelete() throws Exception {
-
- CloudBlockBlob blob = testAccount.getBlobReference("fileY");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("/fileY")));
- assertTrue(fs.delete(new Path("/fileY"), true));
- }
-
- @Test
- public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
-
- CloudBlockBlob blob = testAccount.getBlobReference("folderW/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("/folderW")));
- assertTrue(fs.exists(new Path("/folderW/file")));
- assertTrue(fs.delete(new Path("/folderW"), true));
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_siblingCreate() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder3/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
- assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
-
- Path targetFile = new Path("testFolder3/a/input/file2");
- FSDataOutputStream s2 = fs.create(targetFile);
- s2.close();
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- // creating a new file in the root folder
- @Test
- public void outOfBandFolder_create_rootDir() throws Exception {
- Path targetFile = new Path("/newInRoot");
- FSDataOutputStream s2 = fs.create(targetFile);
- s2.close();
- }
-
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_rename() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir
- + "testFolder4/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
-
- Path srcFilePath = new Path("testFolder4/a/input/file");
- assertTrue(fs.exists(srcFilePath));
-
- Path destFilePath = new Path("testFolder4/a/input/file2");
- fs.rename(srcFilePath, destFilePath);
- }
-
- // Verify that you can rename a file which is the only file in an implicit folder in the
- // WASB file system.
- // scenario for this particular test described at MONARCH-HADOOP-892
- @Test
- public void outOfBandSingleFile_rename() throws Exception {
-
- //NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
- CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
-
- Path srcFilePath = new Path("testFolder5/a/input/file");
- assertTrue(fs.exists(srcFilePath));
-
- Path destFilePath = new Path("testFolder5/file2");
- fs.rename(srcFilePath, destFilePath);
- }
-
- // WASB must force explicit parent directories in create, delete, mkdirs, rename.
- // scenario for this particular test described at MONARCH-HADOOP-764
- @Test
- public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
-
- // NOTE: manual use of CloubBlockBlob targets working directory explicitly.
- // WASB driver methods prepend working directory implicitly.
- CloudBlockBlob blob = testAccount.getBlobReference("fileX");
- BlobOutputStream s = blob.openOutputStream();
- s.close();
-
- Path srcFilePath = new Path("/fileX");
- assertTrue(fs.exists(srcFilePath));
-
- Path destFilePath = new Path("/fileXrename");
- fs.rename(srcFilePath, destFilePath);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
deleted file mode 100644
index 41b8386..0000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.azure.AzureException;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Write data into a page blob and verify you can read back all of it
- * or just a part of it.
- */
-public class TestReadAndSeekPageBlobAfterWrite {
- private static final Log LOG = LogFactory.getLog(TestReadAndSeekPageBlobAfterWrite.class);
-
- private FileSystem fs;
- private AzureBlobStorageTestAccount testAccount;
- private byte[] randomData;
-
- // Page blob physical page size
- private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
-
- // Size of data on page (excluding header)
- private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
- private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
- private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
- private Random rand = new Random();
-
- // A key with a prefix under /pageBlobs, which for the test file system will
- // force use of a page blob.
- private static final String KEY = "/pageBlobs/file.dat";
- private static final Path PATH = new Path(KEY); // path of page blob file to read and write
-
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- return AzureBlobStorageTestAccount.create();
- }
-
- @Before
- public void setUp() throws Exception {
- testAccount = createTestAccount();
- if (testAccount != null) {
- fs = testAccount.getFileSystem();
- }
- assumeNotNull(testAccount);
-
- // Make sure we are using an integral number of pages.
- assertEquals(0, MAX_BYTES % PAGE_SIZE);
-
- // load an in-memory array of random data
- randomData = new byte[PAGE_SIZE * MAX_PAGES];
- rand.nextBytes(randomData);
- }
-
- @After
- public void tearDown() throws Exception {
- if (testAccount != null) {
- testAccount.cleanup();
- testAccount = null;
- fs = null;
- }
- }
-
- /**
- * Make sure the file name (key) is a page blob file name. If anybody changes that,
- * we need to come back and update this test class.
- */
- @Test
- public void testIsPageBlobFileName() {
- AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
- String[] a = KEY.split("/");
- String key2 = a[1] + "/";
- assertTrue(store.isPageBlobKey(key2));
- }
-
- /**
- * For a set of different file sizes, write some random data to a page blob,
- * read it back, and compare that what was read is the same as what was written.
- */
- @Test
- public void testReadAfterWriteRandomData() throws IOException {
-
- // local shorthand
- final int PDS = PAGE_DATA_SIZE;
-
- // Test for sizes at and near page boundaries
- int[] dataSizes = {
-
- // on first page
- 0, 1, 2, 3,
-
- // Near first physical page boundary (because the implementation
- // stores PDS + the page header size bytes on each page).
- PDS - 1, PDS, PDS + 1, PDS + 2, PDS + 3,
-
- // near second physical page boundary
- (2 * PDS) - 1, (2 * PDS), (2 * PDS) + 1, (2 * PDS) + 2, (2 * PDS) + 3,
-
- // near tenth physical page boundary
- (10 * PDS) - 1, (10 * PDS), (10 * PDS) + 1, (10 * PDS) + 2, (10 * PDS) + 3,
-
- // test one big size, >> 4MB (an internal buffer size in the code)
- MAX_BYTES
- };
-
- for (int i : dataSizes) {
- testReadAfterWriteRandomData(i);
- }
- }
-
- private void testReadAfterWriteRandomData(int size) throws IOException {
- writeRandomData(size);
- readRandomDataAndVerify(size);
- }
-
- /**
- * Read "size" bytes of data and verify that what was read and what was written
- * are the same.
- */
- private void readRandomDataAndVerify(int size) throws AzureException, IOException {
- byte[] b = new byte[size];
- FSDataInputStream stream = fs.open(PATH);
- int bytesRead = stream.read(b);
- stream.close();
- assertEquals(bytesRead, size);
-
- // compare the data read to the data written
- assertTrue(comparePrefix(randomData, b, size));
- }
-
- // return true if the beginning "size" values of the arrays are the same
- private boolean comparePrefix(byte[] a, byte[] b, int size) {
- if (a.length < size || b.length < size) {
- return false;
- }
- for (int i = 0; i < size; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
- }
-
- // Write a specified amount of random data to the file path for this test class.
- private void writeRandomData(int size) throws IOException {
- OutputStream output = fs.create(PATH);
- output.write(randomData, 0, size);
- output.close();
- }
-
- /**
- * Write data to a page blob, open it, seek, and then read a range of data.
- * Then compare that the data read from that range is the same as the data originally written.
- */
- @Test
- public void testPageBlobSeekAndReadAfterWrite() throws IOException {
- writeRandomData(PAGE_SIZE * MAX_PAGES);
- int recordSize = 100;
- byte[] b = new byte[recordSize];
- FSDataInputStream stream = fs.open(PATH);
-
- // Seek to a boundary around the middle of the 6th page
- int seekPosition = 5 * PAGE_SIZE + 250;
- stream.seek(seekPosition);
-
- // Read a record's worth of bytes and verify results
- int bytesRead = stream.read(b);
- verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
- // Seek to another spot and read a record greater than a page
- seekPosition = 10 * PAGE_SIZE + 250;
- stream.seek(seekPosition);
- recordSize = 1000;
- b = new byte[recordSize];
- bytesRead = stream.read(b);
- verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
- // Read the last 100 bytes of the file
- recordSize = 100;
- seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
- stream.seek(seekPosition);
- b = new byte[recordSize];
- bytesRead = stream.read(b);
- verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
-
- // Read past the end of the file and we should get only partial data.
- recordSize = 100;
- seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
- stream.seek(seekPosition);
- b = new byte[recordSize];
- bytesRead = stream.read(b);
- assertEquals(50, bytesRead);
-
- // compare last 50 bytes written with those read
- byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
- assertTrue(comparePrefix(tail, b, 50));
- }
-
- // Verify that reading a record of data after seeking gives the expected data.
- private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
- byte[] originalRecordData =
- Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
- assertEquals(recordSize, bytesRead);
- assertTrue(comparePrefix(originalRecordData, b, recordSize));
- }
-
- // Test many small flushed writes interspersed with periodic hflush calls.
- // For manual testing, increase NUM_WRITES to a large number.
- // The goal for a long-running manual test is to make sure that it finishes
- // and the close() call does not time out. It also facilitates debugging into
- // hflush/hsync.
- @Test
- public void testManySmallWritesWithHFlush() throws IOException {
- writeAndReadOneFile(50, 100, 20);
- }
-
- /**
- * Write a total of numWrites * recordLength data to a file, read it back,
- * and check to make sure what was read is the same as what was written.
- * The syncInterval is the number of writes after which to call hflush to
- * force the data to storage.
- */
- private void writeAndReadOneFile(int numWrites, int recordLength, int syncInterval) throws IOException {
- final int NUM_WRITES = numWrites;
- final int RECORD_LENGTH = recordLength;
- final int SYNC_INTERVAL = syncInterval;
-
- // A lower bound on the minimum time we think it will take to do
- // a write to Azure storage.
- final long MINIMUM_EXPECTED_TIME = 20;
- LOG.info("Writing " + NUM_WRITES * RECORD_LENGTH + " bytes to " + PATH.getName());
- FSDataOutputStream output = fs.create(PATH);
- int writesSinceHFlush = 0;
- try {
-
- // Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
- // to test concurrent execution gates.
- output.flush();
- output.hflush();
- for (int i = 0; i < NUM_WRITES; i++) {
- output.write(randomData, i * RECORD_LENGTH, RECORD_LENGTH);
- writesSinceHFlush++;
- output.flush();
- if ((i % SYNC_INTERVAL) == 0) {
- output.hflush();
- writesSinceHFlush = 0;
- }
- }
- } finally {
- long start = Time.monotonicNow();
- output.close();
- long end = Time.monotonicNow();
- LOG.debug("close duration = " + (end - start) + " msec.");
- if (writesSinceHFlush > 0) {
- assertTrue(String.format(
- "close duration with >= 1 pending write is %d, less than minimum expected of %d",
- end - start, MINIMUM_EXPECTED_TIME),
- end - start >= MINIMUM_EXPECTED_TIME);
- }
- }
-
- // Read the data back and check it.
- FSDataInputStream stream = fs.open(PATH);
- int SIZE = NUM_WRITES * RECORD_LENGTH;
- byte[] b = new byte[SIZE];
- try {
- stream.seek(0);
- stream.read(b, 0, SIZE);
- verifyReadRandomData(b, SIZE, 0, SIZE);
- } finally {
- stream.close();
- }
-
- // delete the file
- fs.delete(PATH, false);
- }
-
- // Test writing to a large file repeatedly as a stress test.
- // Set the repetitions to a larger number for manual testing
- // for a longer stress run.
- @Test
- public void testLargeFileStress() throws IOException {
- int numWrites = 32;
- int recordSize = 1024 * 1024;
- int syncInterval = 10;
- int repetitions = 1;
- for (int i = 0; i < repetitions; i++) {
- writeAndReadOneFile(numWrites, recordSize, syncInterval);
- }
- }
-
- // Write to a file repeatedly to verify that it extends.
- // The page blob file should start out at 128MB and finish at 256MB.
- @Test(timeout=300000)
- public void testFileSizeExtension() throws IOException {
- final int writeSize = 1024 * 1024;
- final int numWrites = 129;
- final byte dataByte = 5;
- byte[] data = new byte[writeSize];
- Arrays.fill(data, dataByte);
- FSDataOutputStream output = fs.create(PATH);
- try {
- for (int i = 0; i < numWrites; i++) {
- output.write(data);
- output.hflush();
- LOG.debug("total writes = " + (i + 1));
- }
- } finally {
- output.close();
- }
-
- // Show that we wrote more than the default page blob file size.
- assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
-
- // Verify we can list the new size. That will prove we expanded the file.
- FileStatus[] status = fs.listStatus(PATH);
- assertTrue(status[0].getLen() == numWrites * writeSize);
- LOG.debug("Total bytes written to " + PATH + " = " + status[0].getLen());
- fs.delete(PATH, false);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
index 0bf33d8..0334c39 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
@@ -19,20 +19,23 @@
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
-import static org.junit.Assert.assertEquals;
import java.io.File;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-public class TestShellDecryptionKeyProvider {
- public static final Log LOG = LogFactory
- .getLog(TestShellDecryptionKeyProvider.class);
+/**
+ * Windows only tests of shell scripts to provide decryption keys.
+ */
+public class TestShellDecryptionKeyProvider
+ extends AbstractWasbTestWithTimeout {
+ public static final Logger LOG = LoggerFactory
+ .getLogger(TestShellDecryptionKeyProvider.class);
private static File TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
index 467424b..9d32fb2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
@@ -18,10 +18,6 @@
package org.apache.hadoop.fs.azure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -32,7 +28,10 @@ import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
-public class TestWasbFsck {
+/**
+ * Tests which look at fsck recovery.
+ */
+public class TestWasbFsck extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org