You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/06/19 00:33:00 UTC

[21/39] hadoop git commit: HADOOP-12875. [Azure Data Lake] Support for contract test and unit test cases. Contributed by Vishwajeet Dusane.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e71382/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestAdlRead.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestAdlRead.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestAdlRead.java
new file mode 100644
index 0000000..60904d0
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestAdlRead.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.TestADLResponseData;
+import org.apache.hadoop.fs.common.AdlMockWebServer;
+import org.apache.hadoop.fs.common.TestDataForRead;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Random;
+
+/**
+ * This class is responsible for stress positional reads vs number of network
+ * calls required by to fetch the amount of data. Test does ensure the data
+ * integrity and order of the data is maintained. This tests are meant to test
+ * BufferManager.java and BatchByteArrayInputStream implementation.
+ */
+@RunWith(Parameterized.class)
+public class TestAdlRead extends AdlMockWebServer {
+
+  // Keeping timeout of 1 hour to ensure the test does complete and should
+  // not terminate due to high backend latency.
+  @Rule
+  public Timeout globalTimeout = new Timeout(60 * 60000);
+  private TestDataForRead testData;
+
+  public TestAdlRead(TestDataForRead testData) {
+    this.testData = testData;
+    getConf().set("adl.feature.override.readahead.max.buffersize", "8192");
+    getConf().set("adl.feature.override.readahead.max.concurrent.connection",
+        "1");
+  }
+
+  @Parameterized.Parameters(name = "{index}")
+  public static Collection testDataForReadOperation() {
+    return Arrays.asList(new Object[][] {
+
+        //--------------------------
+        // Test Data
+        //--------------------------
+        {new TestDataForRead("Hello World".getBytes(), 3, 1000, true)},
+        {new TestDataForRead(
+            ("the problem you appear to be wrestling with is that this doesn't "
+                + "display very well. ").getBytes(), 3, 1000, true)},
+        {new TestDataForRead(
+            ("Chinese Indonesians (Indonesian: Orang Tionghoa-Indonesia; "
+                + "Chinese: "
+                + "trad ???????, simp ???????, pin Y\ufffdnd\ufffdn\ufffdx?y\ufffd Hu\ufffdr\ufffdn), are "
+                + "Indonesians descended from various Chinese ethnic groups, "
+                + "particularly Han.").getBytes(), 3, 1000, true)},
+        {new TestDataForRead(
+            TestADLResponseData.getRandomByteArrayData(5 * 1024), 3, 1000,
+            true)}, {new TestDataForRead(
+        TestADLResponseData.getRandomByteArrayData(1 * 1024), 3, 50, true)},
+        {new TestDataForRead(
+            TestADLResponseData.getRandomByteArrayData(8 * 1024), 3, 10, true)},
+        {new TestDataForRead(
+            TestADLResponseData.getRandomByteArrayData(32 * 1024), 6, 10,
+            false)}, {new TestDataForRead(
+        TestADLResponseData.getRandomByteArrayData(48 * 1024), 8, 10, false)}});
+  }
+
+  @After
+  @Before
+  public void cleanReadBuffer() {
+    BufferManager.getInstance().clear();
+  }
+
+  @Test
+  public void testEntireBytes() throws IOException, InterruptedException {
+    getMockServer().setDispatcher(testData.getDispatcher());
+    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
+    byte[] expectedData = new byte[testData.getActualData().length];
+    Assert.assertEquals(in.read(expectedData), expectedData.length);
+    Assert.assertArrayEquals(expectedData, testData.getActualData());
+    in.close();
+    if (testData.isCheckOfNoOfCalls()) {
+      Assert.assertEquals(testData.getExpectedNoNetworkCall(),
+          getMockServer().getRequestCount());
+    }
+  }
+
+  @Test
+  public void testSeekOperation() throws IOException, InterruptedException {
+    getMockServer().setDispatcher(testData.getDispatcher());
+    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
+    Random random = new Random();
+    for (int i = 0; i < 1000; ++i) {
+      int position = random.nextInt(testData.getActualData().length);
+      in.seek(position);
+      Assert.assertEquals(in.getPos(), position);
+      Assert.assertEquals(in.read(), testData.getActualData()[position] & 0xFF);
+    }
+    in.close();
+    if (testData.isCheckOfNoOfCalls()) {
+      Assert.assertEquals(testData.getExpectedNoNetworkCall(),
+          getMockServer().getRequestCount());
+    }
+  }
+
+  @Test
+  public void testReadServerCalls() throws IOException, InterruptedException {
+    getMockServer().setDispatcher(testData.getDispatcher());
+    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
+    byte[] expectedData = new byte[testData.getActualData().length];
+    in.readFully(expectedData);
+    Assert.assertArrayEquals(expectedData, testData.getActualData());
+    Assert.assertEquals(testData.getExpectedNoNetworkCall(),
+        getMockServer().getRequestCount());
+    in.close();
+  }
+
+  @Test
+  public void testReadFully() throws IOException, InterruptedException {
+    getMockServer().setDispatcher(testData.getDispatcher());
+    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
+    byte[] expectedData = new byte[testData.getActualData().length];
+    in.readFully(expectedData);
+    Assert.assertArrayEquals(expectedData, testData.getActualData());
+
+    in.readFully(0, expectedData);
+    Assert.assertArrayEquals(expectedData, testData.getActualData());
+
+    in.seek(0);
+    in.readFully(expectedData, 0, expectedData.length);
+    Assert.assertArrayEquals(expectedData, testData.getActualData());
+    in.close();
+
+    if (testData.isCheckOfNoOfCalls()) {
+      Assert.assertEquals(testData.getExpectedNoNetworkCall(),
+          getMockServer().getRequestCount());
+    }
+  }
+
+  @Test
+  public void testRandomPositionalReadUsingReadFully()
+      throws IOException, InterruptedException {
+    getMockServer().setDispatcher(testData.getDispatcher());
+    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
+    ByteArrayInputStream actualData = new ByteArrayInputStream(
+        testData.getActualData());
+    Random random = new Random();
+    for (int i = 0; i < testData.getIntensityOfTest(); ++i) {
+      int offset = random.nextInt(testData.getActualData().length);
+      int length = testData.getActualData().length - offset;
+      byte[] expectedData = new byte[length];
+      byte[] actualDataSubset = new byte[length];
+      actualData.reset();
+      actualData.skip(offset);
+      actualData.read(actualDataSubset, 0, length);
+
+      in.readFully(offset, expectedData, 0, length);
+      Assert.assertArrayEquals(expectedData, actualDataSubset);
+    }
+
+    for (int i = 0; i < testData.getIntensityOfTest(); ++i) {
+      int offset = random.nextInt(testData.getActualData().length);
+      int length = random.nextInt(testData.getActualData().length - offset);
+      byte[] expectedData = new byte[length];
+      byte[] actualDataSubset = new byte[length];
+      actualData.reset();
+      actualData.skip(offset);
+      actualData.read(actualDataSubset, 0, length);
+
+      in.readFully(offset, expectedData, 0, length);
+      Assert.assertArrayEquals(expectedData, actualDataSubset);
+    }
+
+    in.close();
+    if (testData.isCheckOfNoOfCalls()) {
+      Assert.assertEquals(testData.getExpectedNoNetworkCall(),
+          getMockServer().getRequestCount());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e71382/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConcurrentDataReadOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConcurrentDataReadOperations.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConcurrentDataReadOperations.java
new file mode 100644
index 0000000..1194336
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConcurrentDataReadOperations.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import com.squareup.okhttp.mockwebserver.Dispatcher;
+import com.squareup.okhttp.mockwebserver.MockResponse;
+import com.squareup.okhttp.mockwebserver.RecordedRequest;
+import okio.Buffer;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.TestADLResponseData;
+import org.apache.hadoop.fs.common.AdlMockWebServer;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * This class is responsible for testing multiple threads trying to access same
+ * or multiple files from the offset. This tests are meant to test
+ * BufferManager.java and BatchByteArrayInputStream implementation.
+ */
+@RunWith(Parameterized.class)
+public class TestConcurrentDataReadOperations extends AdlMockWebServer {
+
+  private static FSDataInputStream commonHandle = null;
+  private static Object lock = new Object();
+  private int concurrencyLevel;
+
+  public TestConcurrentDataReadOperations(int concurrencyLevel) {
+    this.concurrencyLevel = concurrencyLevel;
+    getConf().set("adl.feature.override.readahead.max.buffersize", "102400");
+    getConf().set("adl.feature.override.readahead.max.concurrent.connection",
+        "1");
+  }
+
+  @Parameterized.Parameters(name = "{index}")
+  public static Collection testDataNumberOfConcurrentRun() {
+    return Arrays.asList(new Object[][] {{1}, {2}, {3}, {4}, {5}});
+  }
+
+  public static byte[] getRandomByteArrayData(int size) {
+    byte[] b = new byte[size];
+    Random rand = new Random();
+    rand.nextBytes(b);
+    return b;
+  }
+
+  private void setDispatcher(final ArrayList<CreateTestData> testData) {
+    getMockServer().setDispatcher(new Dispatcher() {
+      @Override
+      public MockResponse dispatch(RecordedRequest recordedRequest)
+          throws InterruptedException {
+        if (recordedRequest.getPath().equals("/refresh")) {
+          return AdlMockWebServer.getTokenResponse();
+        }
+
+        CreateTestData currentRequest = null;
+        for (CreateTestData local : testData) {
+          if (recordedRequest.getPath().contains(local.path.toString())) {
+            currentRequest = local;
+            break;
+          }
+        }
+
+        if (currentRequest == null) {
+          new MockResponse().setBody("Request data not found")
+              .setResponseCode(501);
+        }
+
+        if (recordedRequest.getRequestLine().contains("op=GETFILESTATUS")) {
+          return new MockResponse().setResponseCode(200).setBody(
+              TestADLResponseData
+                  .getGetFileStatusJSONResponse(currentRequest.data.length));
+        }
+
+        if (recordedRequest.getRequestLine().contains("op=OPEN")) {
+          String request = recordedRequest.getRequestLine();
+          int offset = 0;
+          int byteCount = 0;
+
+          Pattern pattern = Pattern.compile("offset=([0-9]+)");
+          Matcher matcher = pattern.matcher(request);
+          if (matcher.find()) {
+            System.out.println(matcher.group(1));
+            offset = Integer.parseInt(matcher.group(1));
+          }
+
+          pattern = Pattern.compile("length=([0-9]+)");
+          matcher = pattern.matcher(request);
+          if (matcher.find()) {
+            System.out.println(matcher.group(1));
+            byteCount = Integer.parseInt(matcher.group(1));
+          }
+
+          Buffer buf = new Buffer();
+          buf.write(currentRequest.data, offset, byteCount);
+          return new MockResponse().setResponseCode(200)
+              .setChunkedBody(buf, 4 * 1024 * 1024);
+        }
+
+        return new MockResponse().setBody("NOT SUPPORTED").setResponseCode(501);
+      }
+    });
+  }
+
+  @Before
+  public void resetHandle() {
+    commonHandle = null;
+  }
+
+  @Test
+  public void testParallelReadOnDifferentStreams()
+      throws IOException, InterruptedException, ExecutionException {
+
+    ArrayList<CreateTestData> createTestData = new ArrayList<CreateTestData>();
+
+    Random random = new Random();
+
+    for (int i = 0; i < concurrencyLevel; i++) {
+      CreateTestData testData = new CreateTestData();
+      testData
+          .set(new Path("/test/concurrentRead/" + UUID.randomUUID().toString()),
+              getRandomByteArrayData(random.nextInt(1 * 1024 * 1024)));
+      createTestData.add(testData);
+    }
+
+    setDispatcher(createTestData);
+
+    ArrayList<ReadTestData> readTestData = new ArrayList<ReadTestData>();
+    for (CreateTestData local : createTestData) {
+      ReadTestData localReadData = new ReadTestData();
+      localReadData.set(local.path, local.data, 0);
+      readTestData.add(localReadData);
+    }
+
+    runReadTest(readTestData, false);
+  }
+
+  @Test
+  public void testParallelReadOnSameStreams()
+      throws IOException, InterruptedException, ExecutionException {
+    ArrayList<CreateTestData> createTestData = new ArrayList<CreateTestData>();
+
+    Random random = new Random();
+
+    for (int i = 0; i < 1; i++) {
+      CreateTestData testData = new CreateTestData();
+      testData
+          .set(new Path("/test/concurrentRead/" + UUID.randomUUID().toString()),
+              getRandomByteArrayData(1024 * 1024));
+      createTestData.add(testData);
+    }
+
+    setDispatcher(createTestData);
+
+    ArrayList<ReadTestData> readTestData = new ArrayList<ReadTestData>();
+    ByteArrayInputStream buffered = new ByteArrayInputStream(
+        createTestData.get(0).data);
+
+    ReadTestData readInitially = new ReadTestData();
+    byte[] initialData = new byte[1024 * 1024];
+    buffered.read(initialData);
+
+    readInitially.set(createTestData.get(0).path, initialData, 0);
+    readTestData.add(readInitially);
+    runReadTest(readTestData, false);
+
+    readTestData.clear();
+
+    for (int i = 0; i < concurrencyLevel * 5; i++) {
+      ReadTestData localReadData = new ReadTestData();
+      int offset = random.nextInt((1024 * 1024)-1);
+      int length = 1024 * 1024 - offset;
+      byte[] expectedData = new byte[length];
+      buffered.reset();
+      buffered.skip(offset);
+      buffered.read(expectedData);
+      localReadData.set(createTestData.get(0).path, expectedData, offset);
+      readTestData.add(localReadData);
+    }
+
+    runReadTest(readTestData, true);
+  }
+
+  void runReadTest(ArrayList<ReadTestData> testData, boolean useSameStream)
+      throws InterruptedException, ExecutionException {
+
+    ExecutorService executor = Executors.newFixedThreadPool(testData.size());
+    Future[] subtasks = new Future[testData.size()];
+
+    for (int i = 0; i < testData.size(); i++) {
+      subtasks[i] = executor.submit(
+          new ReadConcurrentRunnable(testData.get(i).data, testData.get(i).path,
+              testData.get(i).offset, useSameStream));
+    }
+
+    executor.shutdown();
+
+    // wait until all tasks are finished
+    executor.awaitTermination(120, TimeUnit.SECONDS);
+
+    for (int i = 0; i < testData.size(); ++i) {
+      Assert.assertTrue((Boolean) subtasks[i].get());
+    }
+  }
+
+  class ReadTestData {
+    private Path path;
+    private byte[] data;
+    private int offset;
+
+    public void set(Path filePath, byte[] dataToBeRead, int fromOffset) {
+      this.path = filePath;
+      this.data = dataToBeRead;
+      this.offset = fromOffset;
+    }
+  }
+
+  class CreateTestData {
+    private Path path;
+    private byte[] data;
+
+    public void set(Path filePath, byte[] dataToBeWritten) {
+      this.path = filePath;
+      this.data = dataToBeWritten;
+    }
+  }
+
+  class ReadConcurrentRunnable implements Callable<Boolean> {
+    private Path path;
+    private int offset;
+    private byte[] expectedData;
+    private boolean useSameStream;
+
+    public ReadConcurrentRunnable(byte[] expectedData, Path path, int offset,
+        boolean useSameStream) {
+      this.path = path;
+      this.offset = offset;
+      this.expectedData = expectedData;
+      this.useSameStream = useSameStream;
+    }
+
+    public Boolean call() throws IOException {
+      try {
+        FSDataInputStream in;
+        if (useSameStream) {
+          synchronized (lock) {
+            if (commonHandle == null) {
+              commonHandle = getMockAdlFileSystem().open(path);
+            }
+            in = commonHandle;
+          }
+        } else {
+          in = getMockAdlFileSystem().open(path);
+        }
+
+        byte[] actualData = new byte[expectedData.length];
+        in.readFully(offset, actualData);
+        Assert.assertArrayEquals("Path :" + path.toString() + " did not match.",
+            expectedData, actualData);
+        if (!useSameStream) {
+          in.close();
+        }
+      } catch (IOException e) {
+        e.printStackTrace();
+        return false;
+      }
+      return true;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e71382/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConfigurationSetting.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConfigurationSetting.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConfigurationSetting.java
new file mode 100644
index 0000000..9a54ec2
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestConfigurationSetting.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.adl.TestableAdlFileSystem;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.CredentialBasedAccessTokenProvider;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * This class is responsible for testing adl file system required configuration
+ * and feature set keys.
+ */
+public class TestConfigurationSetting {
+
+  @Test
+  public void testAllConfiguration() throws URISyntaxException, IOException {
+    TestableAdlFileSystem fs = new TestableAdlFileSystem();
+    Configuration conf = new Configuration();
+    conf.set(HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY,
+        "http://localhost:1111/refresh");
+    conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY,
+        "credential");
+    conf.set(HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY, "MY_CLIENTID");
+    conf.set(HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+    conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY, "true");
+
+    URI uri = new URI("adl://localhost:1234");
+    fs.initialize(uri, conf);
+
+    // Default setting check
+    Assert.assertEquals(true, fs.isFeatureRedirectOff());
+    Assert.assertEquals(true, fs.isFeatureGetBlockLocationLocallyBundled());
+    Assert.assertEquals(true, fs.isFeatureConcurrentReadWithReadAhead());
+    Assert.assertEquals(false, fs.isOverrideOwnerFeatureOn());
+    Assert.assertEquals(8 * 1024 * 1024, fs.getMaxBufferSize());
+    Assert.assertEquals(2, fs.getMaxConcurrentConnection());
+
+    fs.close();
+
+    // Configuration toggle check
+    conf.set("adl.feature.override.redirection.off", "false");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(false, fs.isFeatureRedirectOff());
+    fs.close();
+    conf.set("adl.feature.override.redirection.off", "true");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(true, fs.isFeatureRedirectOff());
+    fs.close();
+
+    conf.set("adl.feature.override.getblocklocation.locally.bundled", "false");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(false, fs.isFeatureGetBlockLocationLocallyBundled());
+    fs.close();
+    conf.set("adl.feature.override.getblocklocation.locally.bundled", "true");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(true, fs.isFeatureGetBlockLocationLocallyBundled());
+    fs.close();
+
+    conf.set("adl.feature.override.readahead", "false");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(false, fs.isFeatureConcurrentReadWithReadAhead());
+    fs.close();
+    conf.set("adl.feature.override.readahead", "true");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(true, fs.isFeatureConcurrentReadWithReadAhead());
+    fs.close();
+
+    conf.set("adl.feature.override.readahead.max.buffersize", "101");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(101, fs.getMaxBufferSize());
+    fs.close();
+    conf.set("adl.feature.override.readahead.max.buffersize", "12134565");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(12134565, fs.getMaxBufferSize());
+    fs.close();
+
+    conf.set("adl.debug.override.localuserasfileowner", "true");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(true, fs.isOverrideOwnerFeatureOn());
+    fs.close();
+    conf.set("adl.debug.override.localuserasfileowner", "false");
+    fs.initialize(uri, conf);
+    Assert.assertEquals(false, fs.isOverrideOwnerFeatureOn());
+    fs.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e71382/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestSplitSizeCalculation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestSplitSizeCalculation.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestSplitSizeCalculation.java
new file mode 100644
index 0000000..42a4701
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/hdfs/web/TestSplitSizeCalculation.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import com.squareup.okhttp.mockwebserver.MockResponse;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.TestADLResponseData;
+import org.apache.hadoop.fs.common.AdlMockWebServer;
+import org.apache.hadoop.hdfs.web.PrivateAzureDataLakeFileSystem.BatchByteArrayInputStream;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+/**
+ * This class is responsible for testing split size calculation during
+ * read ahead buffer initiation based on the data size and configuration
+ * initialization.
+ */
+public class TestSplitSizeCalculation extends AdlMockWebServer {
+
+  @Test
+  public void testSplitSizeCalculations()
+      throws URISyntaxException, IOException {
+
+    getMockServer().enqueue(new MockResponse().setResponseCode(200).setBody(
+        TestADLResponseData.getGetFileStatusJSONResponse(128 * 1024 * 1024)));
+    getMockServer().enqueue(new MockResponse().setResponseCode(200).setBody(
+        TestADLResponseData.getGetFileStatusJSONResponse(128 * 1024 * 1024)));
+    getMockServer().enqueue(new MockResponse().setResponseCode(200).setBody(
+        TestADLResponseData.getGetFileStatusJSONResponse(128 * 1024 * 1024)));
+    getMockServer().enqueue(new MockResponse().setResponseCode(200).setBody(
+        TestADLResponseData.getGetFileStatusJSONResponse(128 * 1024 * 1024)));
+
+    URL url = getMockServer().getUrl("");
+
+    BatchByteArrayInputStream stream = getMockAdlFileSystem()
+        .new BatchByteArrayInputStream(url,
+        new Path("/test1/test2"), 16 * 1024 * 1024, 4);
+    Assert.assertEquals(1, stream.getSplitSize(1 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(2 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(3 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(4 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(5 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(6 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(7 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(8 * 1024 * 1024));
+    Assert.assertEquals(4, stream.getSplitSize(16 * 1024 * 1024));
+    Assert.assertEquals(3, stream.getSplitSize(12 * 1024 * 1024));
+    Assert.assertEquals(4, stream.getSplitSize(102 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(102));
+    stream.close();
+
+    stream = getMockAdlFileSystem().new BatchByteArrayInputStream(url,
+        new Path("/test1/test2"), 4 * 1024 * 1024, 4);
+    Assert.assertEquals(1, stream.getSplitSize(1 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(2 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(3 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(4 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(5 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(8 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(5 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(6 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(7 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(16 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(12 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(102 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(102));
+    stream.close();
+
+    stream = getMockAdlFileSystem().new BatchByteArrayInputStream(url,
+        new Path("/test1/test2"), 16 * 1024 * 1024, 2);
+    Assert.assertEquals(1, stream.getSplitSize(1 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(2 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(3 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(4 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(5 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(5 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(6 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(7 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(8 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(16 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(12 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(102 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(102));
+    stream.close();
+
+    stream = getMockAdlFileSystem().new BatchByteArrayInputStream(url,
+        new Path("/test1/test2"), 8 * 1024 * 1024, 2);
+    Assert.assertEquals(1, stream.getSplitSize(1 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(2 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(3 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(4 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(5 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(6 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(7 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(8 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(16 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(12 * 1024 * 1024));
+    Assert.assertEquals(2, stream.getSplitSize(102 * 1024 * 1024));
+    Assert.assertEquals(1, stream.getSplitSize(102));
+    stream.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e71382/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml b/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml
new file mode 100644
index 0000000..f72de16
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml
@@ -0,0 +1,139 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+  <property>
+    <name>fs.contract.test.root-tests-enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.test.supports-concat</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-returns-false-if-source-missing</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.test.random-seek-count</name>
+    <value>10</value>
+  </property>
+
+  <property>
+    <name>fs.contract.is-case-sensitive</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-returns-true-if-dest-exists</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-returns-true-if-source-missing</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-creates-dest-dirs</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-remove-dest-if-empty-dir</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-settimes</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-append</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-atomic-directory-delete</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-atomic-rename</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-block-locality</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-concat</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-seek</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-seek-on-closed-file</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rejects-seek-past-eof</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-available-on-closed-file</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-strict-exceptions</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-unix-permissions</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-overwrites-dest</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-append</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.azure.enable.append.support</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-getfilestatus</name>
+    <value>true</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e71382/hadoop-tools/hadoop-azure-datalake/src/test/resources/contract-test-options.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/resources/contract-test-options.xml b/hadoop-tools/hadoop-azure-datalake/src/test/resources/contract-test-options.xml
new file mode 100644
index 0000000..160fba4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/resources/contract-test-options.xml
@@ -0,0 +1,57 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+  <property>
+    <name>dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch</name>
+    <value>0</value>
+  </property>
+  <property>
+    <name>dfs.webhdfs.oauth2.credential</name>
+    <value>bearer.and.refresh.token</value>
+  </property>
+  <property>
+    <name>dfs.webhdfs.oauth2.refresh.url</name>
+    <value>https://login.windows.net/common/oauth2/token/</value>
+  </property>
+  <property>
+    <name>dfs.webhdfs.oauth2.access.token.provider</name>
+    <value>
+      org.apache.hadoop.fs.adl.oauth2.CachedRefreshTokenBasedAccessTokenProvider
+    </value>
+  </property>
+  <property>
+    <name>dfs.webhdfs.oauth2.enabled</name>
+    <value>true</value>
+  </property>
+  <!--USER INPUT REQUIRED-->
+  <property>
+    <name>dfs.webhdfs.oauth2.client.id</name>
+    <value>ADD CLIENT ID</value>
+  </property>
+  <!--USER INPUT REQUIRED-->
+  <property>
+    <name>dfs.webhdfs.oauth2.refresh.token</name>
+    <value>ADD REFRESH TOKEN</value>
+  </property>
+  <!--USER INPUT REQUIRED-->
+  <property>
+    <name>fs.defaultFS</name>
+    <value>adl://urAdlAccountGoesHere.azuredatalakestore.net:443/</value>
+  </property>
+  <!--USER INPUT REQUIRED-->
+  <property>
+    <name>dfs.adl.test.contract.enable</name>
+    <value>false</value>
+  </property>
+</configuration>
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org