You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2016/12/03 11:18:32 UTC

[1/8] hadoop git commit: YARN-5915. ATS 1.5 FileSystemTimelineWriter causes flush() to be called after every event write. Contributed by Atul Sikaria

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5085 c87b3a448 -> f885160f4


YARN-5915. ATS 1.5 FileSystemTimelineWriter causes flush() to be called after every event write. Contributed by Atul Sikaria


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f304ccae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f304ccae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f304ccae

Branch: refs/heads/YARN-5085
Commit: f304ccae3c2e0849b0b0b24c4bfe7a3a1ec2bb94
Parents: c87b3a4
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Dec 2 16:54:15 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Dec 2 16:54:15 2016 +0000

----------------------------------------------------------------------
 .../hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f304ccae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index 54b4912..fc3385b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -63,6 +63,7 @@ import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.fasterxml.jackson.core.util.MinimalPrettyPrinter;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
 import com.fasterxml.jackson.databind.type.TypeFactory;
 import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector;
 import com.sun.jersey.api.client.Client;
@@ -276,6 +277,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
     mapper.setAnnotationIntrospector(
         new JaxbAnnotationIntrospector(TypeFactory.defaultInstance()));
     mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
+    mapper.configure(SerializationFeature.FLUSH_AFTER_WRITE_VALUE, false);
     return mapper;
   }
 
@@ -356,6 +358,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
 
     public void flush() throws IOException {
       if (stream != null) {
+        jsonGenerator.flush();
         stream.hflush();
       }
     }
@@ -368,8 +371,6 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       this.stream = createLogFileStream(fs, logPath);
       this.jsonGenerator = new JsonFactory().createGenerator(stream);
       this.jsonGenerator.setPrettyPrinter(new MinimalPrettyPrinter("\n"));
-      this.jsonGenerator.configure(
-          JsonGenerator.Feature.FLUSH_PASSED_TO_STREAM, false);
       this.lastModifiedTime = Time.monotonicNow();
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/8] hadoop git commit: HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang

Posted by as...@apache.org.
HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ff34f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ff34f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ff34f8

Branch: refs/heads/YARN-5085
Commit: c7ff34f8dcca3a2024230c5383abd9299daa1b20
Parents: 0cfd7ad
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Dec 2 11:10:09 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Fri Dec 2 11:10:13 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 32 ++++++++++++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      | 13 +++--
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 12 ++++-
 .../web/resources/NamenodeWebHdfsMethods.java   | 17 +++++++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    | 30 ++++++++++++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 51 ++++++++++++++++++++
 6 files changed, 151 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ff34f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index a75f4f1..12899f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -22,6 +22,7 @@ import com.fasterxml.jackson.databind.ObjectReader;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.FileChecksum;
@@ -588,4 +589,35 @@ class JsonUtilClient {
         lastLocatedBlock, isLastBlockComplete, null, null);
   }
 
+  /** Convert a Json map to BlockLocation. **/
+  static BlockLocation toBlockLocation(Map<String, Object> m)
+      throws IOException{
+    long length = ((Number) m.get("length")).longValue();
+    long offset = ((Number) m.get("offset")).longValue();
+    boolean corrupt = Boolean.
+        getBoolean(m.get("corrupt").toString());
+    String[] storageIds = toStringArray(getList(m, "storageIds"));
+    String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
+    String[] hosts = toStringArray(getList(m, "hosts"));
+    String[] names = toStringArray(getList(m, "names"));
+    String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
+    StorageType[] storageTypes = toStorageTypeArray(
+        getList(m, "storageTypes"));
+    return new BlockLocation(names, hosts, cachedHosts,
+        topologyPaths, storageIds, storageTypes,
+        offset, length, corrupt);
+  }
+
+  static String[] toStringArray(List<?> list) {
+    if (list == null) {
+      return null;
+    } else {
+      final String[] array = new String[list.size()];
+      int i = 0;
+      for (Object object : list) {
+        array[i++] = object.toString();
+      }
+      return array;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ff34f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 23804b7..e82e9f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1610,13 +1610,20 @@ public class WebHdfsFileSystem extends FileSystem
     statistics.incrementReadOps(1);
     storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
 
-    final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
+    final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
     return new FsPathResponseRunner<BlockLocation[]>(op, p,
         new OffsetParam(offset), new LengthParam(length)) {
       @Override
+      @SuppressWarnings("unchecked")
       BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
-        return DFSUtilClient.locatedBlocks2Locations(
-            JsonUtilClient.toLocatedBlocks(json));
+        List<?> list = JsonUtilClient.getList(json, "BlockLocations");
+        BlockLocation[] locations = new BlockLocation[list.size()];
+        for(int i=0; i<locations.length; i++) {
+          BlockLocation bl = JsonUtilClient.
+              toBlockLocation((Map<String, Object>) list.get(i));
+          locations[i] = bl;
+        }
+        return locations;
       }
     }.run();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ff34f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index 635e6d7..ccb0bb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -33,8 +33,18 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
     GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
 
-    /** GET_BLOCK_LOCATIONS is a private unstable op. */
+    /**
+     * GET_BLOCK_LOCATIONS is a private/stable API op. It returns a
+     * {@link org.apache.hadoop.hdfs.protocol.LocatedBlocks}
+     * json object.
+     */
     GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
+    /**
+     * GETFILEBLOCKLOCATIONS is the public op that complies with
+     * {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations}
+     * interface.
+     */
+    GETFILEBLOCKLOCATIONS(false, HttpURLConnection.HTTP_OK),
     GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
     GETTRASHROOT(false, HttpURLConnection.HTTP_OK),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ff34f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 5d9b12a..107d4ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -54,6 +54,7 @@ import javax.ws.rs.core.StreamingOutput;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -975,6 +976,22 @@ public class NamenodeWebHdfsMethods {
         return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
       }
     }
+    case GETFILEBLOCKLOCATIONS:
+    {
+      final long offsetValue = offset.getValue();
+      final Long lengthValue = length.getValue();
+
+      try (final FileSystem fs = FileSystem.get(conf != null ?
+          conf : new Configuration())) {
+        BlockLocation[] locations = fs.getFileBlockLocations(
+            new org.apache.hadoop.fs.Path(fullpath),
+            offsetValue,
+            lengthValue != null? lengthValue: Long.MAX_VALUE);
+        final String js = JsonUtil.toJsonString("BlockLocations",
+            JsonUtil.toJsonArray(locations));
+        return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+      }
+    }
     case GET_BLOCK_LOCATIONS:
     {
       final long offsetValue = offset.getValue();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ff34f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index a0dadbd..affa861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -436,4 +436,34 @@ public class JsonUtil {
     return MAPPER.writeValueAsString(obj);
   }
 
+  public static Object[] toJsonArray(BlockLocation[] locations)
+      throws IOException {
+    if(locations == null) {
+      return null;
+    }
+    Object[] blockLocations = new Object[locations.length];
+    for(int i=0; i<locations.length; i++) {
+      blockLocations[i] = toJsonMap(locations[i]);
+    }
+    return blockLocations;
+  }
+
+  public static Map<String, Object> toJsonMap(
+      final BlockLocation blockLocation) throws IOException {
+    if (blockLocation == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put("length", blockLocation.getLength());
+    m.put("offset", blockLocation.getOffset());
+    m.put("corrupt", blockLocation.isCorrupt());
+    m.put("storageTypes", toJsonArray(blockLocation.getStorageTypes()));
+    m.put("storageIds", blockLocation.getStorageIds());
+    m.put("cachedHosts", blockLocation.getCachedHosts());
+    m.put("hosts", blockLocation.getHosts());
+    m.put("names", blockLocation.getNames());
+    m.put("topologyPaths", blockLocation.getTopologyPaths());
+    return m;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ff34f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 5386a45..82b708a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -37,6 +37,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
+import java.util.Map;
 import java.util.Random;
 
 import org.apache.commons.io.IOUtils;
@@ -88,6 +89,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.type.MapType;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Mockito.doReturn;
@@ -852,6 +855,46 @@ public class TestWebHDFS {
         Assert.assertTrue(storageTypes != null && storageTypes.length > 0 &&
             storageTypes[0] == StorageType.DISK);
       }
+
+      // Query webhdfs REST API to get block locations
+      InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
+      URL url = new URL("http", addr.getHostString(), addr.getPort(),
+          WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS");
+      LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url);
+
+      String response = getResponse(url, "GET");
+      LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response);
+      // Expected output from rest API
+      // { "BlockLoactions" : [{Block_Loation_Json}, ...] }
+      ObjectMapper mapper = new ObjectMapper();
+      MapType jsonType = mapper.getTypeFactory().constructMapType(
+          Map.class,
+          String.class,
+          BlockLocation[].class);
+      Map<String, BlockLocation[]> jsonMap = mapper.readValue(response,
+          jsonType);
+      BlockLocation[] array = jsonMap.get("BlockLocations");
+
+      for(int i=0; i<locations.length; i++) {
+        BlockLocation raw = locations[i];
+        BlockLocation rest = array[i];
+        Assert.assertEquals(raw.getLength(),
+            rest.getLength());
+        Assert.assertEquals(raw.getOffset(),
+            rest.getOffset());
+        Assert.assertArrayEquals(raw.getCachedHosts(),
+            rest.getCachedHosts());
+        Assert.assertArrayEquals(raw.getHosts(),
+            rest.getHosts());
+        Assert.assertArrayEquals(raw.getNames(),
+            rest.getNames());
+        Assert.assertArrayEquals(raw.getStorageIds(),
+            rest.getStorageIds());
+        Assert.assertArrayEquals(raw.getTopologyPaths(),
+            rest.getTopologyPaths());
+        Assert.assertArrayEquals(raw.getStorageTypes(),
+            rest.getStorageTypes());
+      }
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -859,6 +902,14 @@ public class TestWebHDFS {
     }
   }
 
+  private static String getResponse(URL url, String httpRequestType)
+      throws IOException {
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod(httpRequestType);
+    conn.setInstanceFollowRedirects(false);
+    return IOUtils.toString(conn.getInputStream());
+  }
+
   private WebHdfsFileSystem createWebHDFSAsTestUser(final Configuration conf,
       final URI uri, final String userName) throws Exception {
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[6/8] hadoop git commit: HADOOP-13855. Fix a couple of the s3a statistic names to be consistent with the rest. Contributed by Steve Loughran

Posted by as...@apache.org.
HADOOP-13855. Fix a couple of the s3a statistic names to be consistent with the rest. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51211a7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51211a7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51211a7d

Branch: refs/heads/YARN-5085
Commit: 51211a7d7aa342b93951fe61da3f624f0652e101
Parents: 5bd18c4
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Dec 2 13:48:15 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Fri Dec 2 14:01:42 2016 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/s3a/Statistic.java        | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51211a7d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
index 36ec50b..789c6d7 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -92,12 +92,12 @@ public enum Statistic {
       "Count of times the TCP stream was aborted"),
   STREAM_BACKWARD_SEEK_OPERATIONS("stream_backward_seek_operations",
       "Number of executed seek operations which went backwards in a stream"),
-  STREAM_CLOSED("streamClosed", "Count of times the TCP stream was closed"),
+  STREAM_CLOSED("stream_closed", "Count of times the TCP stream was closed"),
   STREAM_CLOSE_OPERATIONS("stream_close_operations",
       "Total count of times an attempt to close a data stream was made"),
   STREAM_FORWARD_SEEK_OPERATIONS("stream_forward_seek_operations",
       "Number of executed seek operations which went forward in a stream"),
-  STREAM_OPENED("streamOpened",
+  STREAM_OPENED("stream_opened",
       "Total count of times an input stream to object store was opened"),
   STREAM_READ_EXCEPTIONS("stream_read_exceptions",
       "Number of seek operations invoked on input streams"),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/8] hadoop git commit: MAPREDUCE-6815. Fix flaky TestKill.testKillTask(). Contributed by Haibo Chen

Posted by as...@apache.org.
MAPREDUCE-6815. Fix flaky TestKill.testKillTask(). Contributed by Haibo Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cfd7ad2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cfd7ad2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cfd7ad2

Branch: refs/heads/YARN-5085
Commit: 0cfd7ad21f4457513ed3416e5d77f3123bfe9da0
Parents: f304cca
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Dec 2 17:22:11 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Dec 2 17:22:11 2016 +0000

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java | 1 +
 .../src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cfd7ad2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index 34d9f0e..8a6fa30 100755
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -259,6 +259,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
     // d. TA processes TA_KILL event and sends T_ATTEMPT_KILLED to the task.
     .addTransition(TaskStateInternal.KILLED, TaskStateInternal.KILLED,
         EnumSet.of(TaskEventType.T_KILL,
+                   TaskEventType.T_SCHEDULE,
                    TaskEventType.T_ATTEMPT_KILLED,
                    TaskEventType.T_ADD_SPEC_ATTEMPT))
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cfd7ad2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
index 0714647..f681cf8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
@@ -105,7 +105,7 @@ public class TestKill {
     Job job = app.submit(new Configuration());
     
     //wait and vailidate for Job to become RUNNING
-    app.waitForState(job, JobState.RUNNING);
+    app.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
     Map<TaskId,Task> tasks = job.getTasks();
     Assert.assertEquals("No of tasks is not correct", 2, 
         tasks.size());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[4/8] hadoop git commit: HADOOP-13857. S3AUtils.translateException to map (wrapped) InterruptedExceptions to InterruptedIOEs. Contributed by Steve Loughran

Posted by as...@apache.org.
HADOOP-13857. S3AUtils.translateException to map (wrapped) InterruptedExceptions to InterruptedIOEs. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ff84a00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ff84a00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ff84a00

Branch: refs/heads/YARN-5085
Commit: 2ff84a00405e977b1fd791cfb974244580dd5ae8
Parents: c7ff34f
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Dec 2 13:36:04 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Fri Dec 2 13:36:04 2016 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 23 ++++++++++++
 .../fs/s3a/TestS3AExceptionTranslation.java     | 38 ++++++++++++++++++++
 2 files changed, 61 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff84a00/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 49f8862..dedbfd4 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -40,6 +40,7 @@ import org.slf4j.Logger;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
@@ -113,6 +114,10 @@ public final class S3AUtils {
         path != null ? (" on " + path) : "",
         exception);
     if (!(exception instanceof AmazonServiceException)) {
+      if (containsInterruptedException(exception)) {
+        return (IOException)new InterruptedIOException(message)
+            .initCause(exception);
+      }
       return new AWSClientIOException(message, exception);
     } else {
 
@@ -195,6 +200,24 @@ public final class S3AUtils {
   }
 
   /**
+   * Recurse down the exception loop looking for any inner details about
+   * an interrupted exception.
+   * @param thrown exception thrown
+   * @return true if down the execution chain the operation was an interrupt
+   */
+  static boolean containsInterruptedException(Throwable thrown) {
+    if (thrown == null) {
+      return false;
+    }
+    if (thrown instanceof InterruptedException ||
+        thrown instanceof InterruptedIOException) {
+      return true;
+    }
+    // tail recurse
+    return containsInterruptedException(thrown.getCause());
+  }
+
+  /**
    * Get low level details of an amazon exception for logging; multi-line.
    * @param e exception
    * @return string details

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff84a00/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AExceptionTranslation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AExceptionTranslation.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AExceptionTranslation.java
index a7dafa0..e548ac2 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AExceptionTranslation.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AExceptionTranslation.java
@@ -25,9 +25,12 @@ import static org.junit.Assert.*;
 
 import java.io.EOFException;
 import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.nio.file.AccessDeniedException;
 import java.util.Collections;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
 
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
@@ -124,4 +127,39 @@ public class TestS3AExceptionTranslation {
     return verifyExceptionClass(clazz,
         translateException("test", "/", exception));
   }
+
+  private void assertContainsInterrupted(boolean expected, Throwable thrown)
+      throws Throwable {
+    if (containsInterruptedException(thrown) != expected) {
+      throw thrown;
+    }
+  }
+
+  @Test
+  public void testInterruptExceptionDetecting() throws Throwable {
+    InterruptedException interrupted = new InterruptedException("irq");
+    assertContainsInterrupted(true, interrupted);
+    IOException ioe = new IOException("ioe");
+    assertContainsInterrupted(false, ioe);
+    assertContainsInterrupted(true, ioe.initCause(interrupted));
+    assertContainsInterrupted(true,
+        new InterruptedIOException("ioirq"));
+  }
+
+  @Test(expected = InterruptedIOException.class)
+  public void testExtractInterrupted() throws Throwable {
+    throw extractException("", "",
+        new ExecutionException(
+            new AmazonClientException(
+                new InterruptedException(""))));
+  }
+
+  @Test(expected = InterruptedIOException.class)
+  public void testExtractInterruptedIO() throws Throwable {
+    throw extractException("", "",
+        new ExecutionException(
+            new AmazonClientException(
+              new InterruptedIOException(""))));
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[5/8] hadoop git commit: YARN-5929. Missing scheduling policy in the FS queue metric. (Contributed by Yufei Gu via Daniel Templeton)

Posted by as...@apache.org.
YARN-5929. Missing scheduling policy in the FS queue metric. (Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bd18c49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bd18c49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bd18c49

Branch: refs/heads/YARN-5085
Commit: 5bd18c49bd5075fa20d24363dceea7828e3fa266
Parents: 2ff84a0
Author: Daniel Templeton <te...@apache.org>
Authored: Fri Dec 2 13:35:09 2016 -0800
Committer: Daniel Templeton <te...@apache.org>
Committed: Fri Dec 2 13:55:42 2016 -0800

----------------------------------------------------------------------
 .../scheduler/fair/FSQueueMetrics.java          | 32 +++++++--
 .../scheduler/fair/TestFSQueueMetrics.java      | 69 ++++++++++++++++++++
 2 files changed, 97 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd18c49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
index a970815..ca375f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
@@ -169,6 +170,12 @@ public class FSQueueMetrics extends QueueMetrics {
     amResourceUsageVCores.set(resource.getVirtualCores());
   }
 
+  /**
+   * Get the scheduling policy.
+   *
+   * @return the scheduling policy
+   */
+  @Metric("Scheduling policy")
   public String getSchedulingPolicy() {
     return schedulingPolicy;
   }
@@ -181,21 +188,38 @@ public class FSQueueMetrics extends QueueMetrics {
   static FSQueueMetrics forQueue(String queueName, Queue parent,
       boolean enableUserMetrics, Configuration conf) {
     MetricsSystem ms = DefaultMetricsSystem.instance();
+    return forQueue(ms, queueName, parent, enableUserMetrics, conf);
+  }
+
+  /**
+   * Get the FS queue metric for the given queue. Create one and register it to
+   * metrics system if there isn't one for the queue.
+   *
+   * @param ms the metric system
+   * @param queueName queue name
+   * @param parent parent queue
+   * @param enableUserMetrics  if user metrics is needed
+   * @param conf configuration
+   * @return a FSQueueMetrics object
+   */
+  @VisibleForTesting
+  public synchronized
+  static FSQueueMetrics forQueue(MetricsSystem ms, String queueName,
+      Queue parent, boolean enableUserMetrics, Configuration conf) {
     QueueMetrics metrics = queueMetrics.get(queueName);
     if (metrics == null) {
       metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
           .tag(QUEUE_INFO, queueName);
-      
+
       // Register with the MetricsSystems
       if (ms != null) {
         metrics = ms.register(
-                sourceName(queueName).toString(), 
-                "Metrics for queue: " + queueName, metrics);
+            sourceName(queueName).toString(),
+            "Metrics for queue: " + queueName, metrics);
       }
       queueMetrics.put(queueName, metrics);
     }
 
     return (FSQueueMetrics)metrics;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd18c49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java
new file mode 100644
index 0000000..7ccfbc3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
+import org.apache.hadoop.metrics2.impl.MetricsRecords;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestQueueMetrics;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * The test class for {@link FSQueueMetrics}.
+ */
+public class TestFSQueueMetrics {
+  private static final Configuration CONF = new Configuration();
+
+  private MetricsSystem ms;
+
+  @Before public void setUp() {
+    ms = new MetricsSystemImpl();
+    QueueMetrics.clearQueueMetrics();
+  }
+
+  /**
+   * Test if the metric scheduling policy is set correctly.
+   */
+  @Test
+  public void testSchedulingPolicy() {
+    String queueName = "single";
+
+    FSQueueMetrics metrics = FSQueueMetrics.forQueue(ms, queueName, null, false,
+        CONF);
+    metrics.setSchedulingPolicy("drf");
+    checkSchedulingPolicy(queueName, "drf");
+
+    // test resetting the scheduling policy
+    metrics.setSchedulingPolicy("fair");
+    checkSchedulingPolicy(queueName, "fair");
+  }
+
+  private void checkSchedulingPolicy(String queueName, String policy) {
+    MetricsSource queueSource = TestQueueMetrics.queueSource(ms, queueName);
+    MetricsCollectorImpl collector = new MetricsCollectorImpl();
+    queueSource.getMetrics(collector, true);
+    MetricsRecords.assertTag(collector.getRecords().get(0), "SchedulingPolicy",
+        policy);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[7/8] hadoop git commit: HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet Dusane

Posted by as...@apache.org.
HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4113ec5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4113ec5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4113ec5f

Branch: refs/heads/YARN-5085
Commit: 4113ec5fa5ca049ebaba039b1faf3911c6a34f7b
Parents: 51211a7
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Dec 2 15:54:57 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Fri Dec 2 15:54:57 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  24 +-
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   |   6 +-
 .../apache/hadoop/fs/adl/TestListStatus.java    |   6 +-
 .../fs/adl/live/TestAdlContractAppendLive.java  |  11 +-
 .../fs/adl/live/TestAdlContractConcatLive.java  |  23 +-
 .../fs/adl/live/TestAdlContractCreateLive.java  |  19 +-
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  11 +-
 .../live/TestAdlContractGetFileStatusLive.java  |  36 ++
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  25 +-
 .../fs/adl/live/TestAdlContractOpenLive.java    |  11 +-
 .../fs/adl/live/TestAdlContractRenameLive.java  |  30 +-
 .../fs/adl/live/TestAdlContractRootDirLive.java |  19 +-
 .../fs/adl/live/TestAdlContractSeekLive.java    |  11 +-
 .../live/TestAdlDifferentSizeWritesLive.java    |  69 ++--
 .../live/TestAdlFileContextCreateMkdirLive.java |  67 ++++
 .../TestAdlFileContextMainOperationsLive.java   |  99 ++++++
 .../adl/live/TestAdlFileSystemContractLive.java |  57 +---
 .../live/TestAdlInternalCreateNonRecursive.java | 134 ++++++++
 .../fs/adl/live/TestAdlPermissionLive.java      | 116 +++++++
 .../adl/live/TestAdlSupportedCharsetInPath.java | 334 +++++++++++++++++++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 111 ++++++
 21 files changed, 995 insertions(+), 224 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9083afc..bd43c52 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -346,7 +346,6 @@ public class AdlFileSystem extends FileSystem {
    * @see #setPermission(Path, FsPermission)
    * @deprecated API only for 0.20-append
    */
-  @Deprecated
   @Override
   public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
       EnumSet<CreateFlag> flags, int bufferSize, short replication,
@@ -471,6 +470,10 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public boolean rename(final Path src, final Path dst) throws IOException {
     statistics.incrementWriteOps(1);
+    if (toRelativeFilePath(src).equals("/")) {
+      return false;
+    }
+
     return adlClient.rename(toRelativeFilePath(src), toRelativeFilePath(dst));
   }
 
@@ -522,9 +525,24 @@ public class AdlFileSystem extends FileSystem {
   public boolean delete(final Path path, final boolean recursive)
       throws IOException {
     statistics.incrementWriteOps(1);
+    String relativePath = toRelativeFilePath(path);
+    // Delete on root directory not supported.
+    if (relativePath.equals("/")) {
+      // This is important check after recent commit
+      // HADOOP-12977 and HADOOP-13716 validates on root for
+      // 1. if root is empty and non recursive delete then return false.
+      // 2. if root is non empty and non recursive delete then throw exception.
+      if (!recursive
+          && adlClient.enumerateDirectory(toRelativeFilePath(path), 1).size()
+          > 0) {
+        throw new IOException("Delete on root is not supported.");
+      }
+      return false;
+    }
+
     return recursive ?
-        adlClient.deleteRecursive(toRelativeFilePath(path)) :
-        adlClient.delete(toRelativeFilePath(path));
+        adlClient.deleteRecursive(relativePath) :
+        adlClient.delete(relativePath);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
index 734256a..172663c 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
@@ -102,7 +102,7 @@ public class TestAdlRead extends AdlMockWebServer {
       n += count;
     }
 
-    Assert.assertEquals(testData.getActualData().length, expectedData.length);
+    Assert.assertEquals(expectedData.length, testData.getActualData().length);
     Assert.assertArrayEquals(expectedData, testData.getActualData());
     in.close();
     if (testData.isCheckOfNoOfCalls()) {
@@ -119,8 +119,8 @@ public class TestAdlRead extends AdlMockWebServer {
     for (int i = 0; i < 1000; ++i) {
       int position = random.nextInt(testData.getActualData().length);
       in.seek(position);
-      Assert.assertEquals(in.getPos(), position);
-      Assert.assertEquals(in.read(), testData.getActualData()[position] & 0xFF);
+      Assert.assertEquals(position, in.getPos());
+      Assert.assertEquals(testData.getActualData()[position] & 0xFF, in.read());
     }
     in.close();
     if (testData.isCheckOfNoOfCalls()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
index dd27a10..c151e89 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
@@ -50,7 +50,7 @@ public class TestListStatus extends AdlMockWebServer {
         .listStatus(new Path("/test1/test2"));
     long endTime = Time.monotonicNow();
     LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertEquals(ls.length, 10);
+    Assert.assertEquals(10, ls.length);
 
     getMockServer().enqueue(new MockResponse().setResponseCode(200)
         .setBody(TestADLResponseData.getListFileStatusJSONResponse(200)));
@@ -58,7 +58,7 @@ public class TestListStatus extends AdlMockWebServer {
     ls = getMockAdlFileSystem().listStatus(new Path("/test1/test2"));
     endTime = Time.monotonicNow();
     LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertEquals(ls.length, 200);
+    Assert.assertEquals(200, ls.length);
 
     getMockServer().enqueue(new MockResponse().setResponseCode(200)
         .setBody(TestADLResponseData.getListFileStatusJSONResponse(2048)));
@@ -66,7 +66,7 @@ public class TestListStatus extends AdlMockWebServer {
     ls = getMockAdlFileSystem().listStatus(new Path("/test1/test2"));
     endTime = Time.monotonicNow();
     LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertEquals(ls.length, 2048);
+    Assert.assertEquals(2048, ls.length);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java
index 83390af..ffe6dd3 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java
@@ -23,11 +23,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Before;
 import org.junit.Test;
 
 /**
- * Verify Adls APPEND semantics compliance with Hadoop.
+ * Test Append on Adl file system.
  */
 public class TestAdlContractAppendLive extends AbstractContractAppendTest {
 
@@ -42,12 +41,4 @@ public class TestAdlContractAppendLive extends AbstractContractAppendTest {
     ContractTestUtils.unsupported("Skipping since renaming file in append "
         + "mode not supported in Adl");
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
index 8474e9c..60d30ac 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
@@ -20,14 +20,15 @@
 package org.apache.hadoop.fs.adl.live;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Before;
 import org.junit.Test;
 
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+
 /**
- * Verify Adls CONCAT semantics compliance with Hadoop.
+ * Test concat on Adl file system.
  */
 public class TestAdlContractConcatLive extends AbstractContractConcatTest {
 
@@ -36,17 +37,13 @@ public class TestAdlContractConcatLive extends AbstractContractConcatTest {
     return new AdlStorageContract(configuration);
   }
 
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
-
   @Test
   public void testConcatMissingTarget() throws Throwable {
-    ContractTestUtils.unsupported("BUG : Adl to support expectation from "
-        + "concat on missing targets.");
+    Path testPath = path("test");
+    Path zeroByteFile = new Path(testPath, "zero.txt");
+    Path target = new Path(testPath, "target");
+    touch(getFileSystem(), zeroByteFile);
+    // Concat on missing target is allowed on Adls file system.
+    getFileSystem().concat(target, new Path[] {zeroByteFile});
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
index 907c50c..06347e9 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
@@ -22,12 +22,9 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Before;
-import org.junit.Test;
 
 /**
- * Verify Adls CREATE semantics compliance with Hadoop.
+ * Test creating files, overwrite options.
  */
 public class TestAdlContractCreateLive extends AbstractContractCreateTest {
 
@@ -35,18 +32,4 @@ public class TestAdlContractCreateLive extends AbstractContractCreateTest {
   protected AbstractFSContract createContract(Configuration configuration) {
     return new AdlStorageContract(configuration);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
-
-  @Test
-  public void testOverwriteEmptyDirectory() throws Throwable {
-    ContractTestUtils
-        .unsupported("BUG : Adl to support override empty " + "directory.");
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractDeleteLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractDeleteLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractDeleteLive.java
index 30eaec7..6961f15 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractDeleteLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractDeleteLive.java
@@ -22,10 +22,9 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Before;
 
 /**
- * Verify Adls DELETE semantics compliance with Hadoop.
+ * Test delete contract test.
  */
 public class TestAdlContractDeleteLive extends AbstractContractDeleteTest {
 
@@ -33,12 +32,4 @@ public class TestAdlContractDeleteLive extends AbstractContractDeleteTest {
   protected AbstractFSContract createContract(Configuration configuration) {
     return new AdlStorageContract(configuration);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractGetFileStatusLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractGetFileStatusLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractGetFileStatusLive.java
new file mode 100644
index 0000000..d50dd68
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractGetFileStatusLive.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * Test getFileStatus contract test.
+ */
+public class TestAdlContractGetFileStatusLive extends
+    AbstractContractGetFileStatusTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new AdlStorageContract(configuration);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractMkdirLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractMkdirLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractMkdirLive.java
index e498110..5e760c5 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractMkdirLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractMkdirLive.java
@@ -22,34 +22,13 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Before;
-import org.junit.Test;
 
 /**
- * Verify Adls MKDIR semantics compliance with Hadoop.
+ * Test Mkdir contract on Adl storage file system.
  */
 public class TestAdlContractMkdirLive extends AbstractContractMkdirTest {
   @Override
   protected AbstractFSContract createContract(Configuration conf) {
     return new AdlStorageContract(conf);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
-
-  @Test
-  public void testMkdirOverParentFile() throws Throwable {
-    ContractTestUtils.unsupported("Not supported by Adl");
-  }
-
-  @Test
-  public void testNoMkdirOverFile() throws Throwable {
-    ContractTestUtils.unsupported("Not supported by Adl");
-  }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractOpenLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractOpenLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractOpenLive.java
index 2bb2095..7a35d2c 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractOpenLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractOpenLive.java
@@ -22,10 +22,9 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Before;
 
 /**
- * Verify Adls OPEN/READ semantics compliance with Hadoop.
+ * Test OPEN - read API.
  */
 public class TestAdlContractOpenLive extends AbstractContractOpenTest {
 
@@ -33,12 +32,4 @@ public class TestAdlContractOpenLive extends AbstractContractOpenTest {
   protected AbstractFSContract createContract(Configuration configuration) {
     return new AdlStorageContract(configuration);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java
index 06063c5..d72d35e 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java
@@ -22,12 +22,9 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Before;
-import org.junit.Test;
 
 /**
- * Verify Adls RENAME semantics compliance with Hadoop.
+ * Test rename contract test cases on Adl file system.
  */
 public class TestAdlContractRenameLive extends AbstractContractRenameTest {
 
@@ -35,29 +32,4 @@ public class TestAdlContractRenameLive extends AbstractContractRenameTest {
   protected AbstractFSContract createContract(Configuration configuration) {
     return new AdlStorageContract(configuration);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
-
-  @Test
-  public void testRenameFileOverExistingFile() throws Throwable {
-    ContractTestUtils
-        .unsupported("BUG : Adl to support full complete POSIX" + "behaviour");
-  }
-
-  @Test
-  public void testRenameFileNonexistentDir() throws Throwable {
-    ContractTestUtils
-        .unsupported("BUG : Adl to support create dir is not " + "exist");
-  }
-
-  @Test
-  public void testRenameWithNonEmptySubDir() throws Throwable {
-    ContractTestUtils.unsupported("BUG : Adl to support non empty dir move.");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRootDirLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRootDirLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRootDirLive.java
index bf4e549..8ebc632 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRootDirLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRootDirLive.java
@@ -22,12 +22,9 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Before;
-import org.junit.Test;
 
 /**
- * Verify Adls root level operation support.
+ * Test operation on root level.
  */
 public class TestAdlContractRootDirLive
     extends AbstractContractRootDirectoryTest {
@@ -35,18 +32,4 @@ public class TestAdlContractRootDirLive
   protected AbstractFSContract createContract(Configuration configuration) {
     return new AdlStorageContract(configuration);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
-
-  @Test
-  public void testRmNonEmptyRootDirNonRecursive() throws Throwable {
-    ContractTestUtils.unsupported(
-        "BUG : Adl should throw exception instred " + "of returning false.");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractSeekLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractSeekLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractSeekLive.java
index 0976464..62423b6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractSeekLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractSeekLive.java
@@ -22,10 +22,9 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Before;
 
 /**
- * Verify Adls OPEN/READ seek operation support.
+ * Test seek operation on Adl file system.
  */
 public class TestAdlContractSeekLive extends AbstractContractSeekTest {
 
@@ -33,12 +32,4 @@ public class TestAdlContractSeekLive extends AbstractContractSeekTest {
   protected AbstractFSContract createContract(Configuration configuration) {
     return new AdlStorageContract(configuration);
   }
-
-  @Before
-  @Override
-  public void setup() throws Exception {
-    org.junit.Assume
-        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
-    super.setup();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlDifferentSizeWritesLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlDifferentSizeWritesLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlDifferentSizeWritesLive.java
index 8f53400..5421e0b 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlDifferentSizeWritesLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlDifferentSizeWritesLive.java
@@ -23,27 +23,63 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.common.Parallelized;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Random;
+import java.util.UUID;
+
+import static org.apache.hadoop.fs.adl.AdlConfKeys.WRITE_BUFFER_SIZE_KEY;
 
 /**
- * Verify different data segment size writes ensure the integrity and
- * order of the data.
+ * Verify data integrity with different data sizes with buffer size.
  */
+@RunWith(Parallelized.class)
 public class TestAdlDifferentSizeWritesLive {
+  private static Random rand = new Random();
+  private int totalSize;
+  private int chunkSize;
+
+  public TestAdlDifferentSizeWritesLive(int totalSize, int chunkSize) {
+    this.totalSize = totalSize;
+    this.chunkSize = chunkSize;
+  }
 
   public static byte[] getRandomByteArrayData(int size) {
     byte[] b = new byte[size];
-    Random rand = new Random();
     rand.nextBytes(b);
     return b;
   }
 
+  @Parameterized.Parameters(name = "{index}: Data Size [{0}] ; Chunk Size "
+      + "[{1}]")
+  public static Collection testDataForIntegrityTest() {
+    return Arrays.asList(
+        new Object[][] {{4 * 1024, 1 * 1024}, {4 * 1024, 7 * 1024},
+            {4 * 1024, 10}, {2 * 1024, 10}, {1 * 1024, 10}, {100, 1},
+            {4 * 1024, 1 * 1024}, {7 * 1024, 2 * 1024}, {9 * 1024, 2 * 1024},
+            {10 * 1024, 3 * 1024}, {10 * 1024, 1 * 1024},
+            {10 * 1024, 8 * 1024}});
+  }
+
+  @BeforeClass
+  public static void cleanUpParent() throws IOException, URISyntaxException {
+    if (AdlStorageConfiguration.isContractTestEnabled()) {
+      Path path = new Path("/test/dataIntegrityCheck/");
+      FileSystem fs = AdlStorageConfiguration.createStorageConnector();
+      fs.delete(path, true);
+    }
+  }
+
   @Before
   public void setup() throws Exception {
     org.junit.Assume
@@ -51,32 +87,17 @@ public class TestAdlDifferentSizeWritesLive {
   }
 
   @Test
-  public void testSmallDataWrites() throws IOException {
-    testDataIntegrity(4 * 1024 * 1024, 1 * 1024);
-    testDataIntegrity(4 * 1024 * 1024, 7 * 1024);
-    testDataIntegrity(4 * 1024 * 1024, 10);
-    testDataIntegrity(2 * 1024 * 1024, 10);
-    testDataIntegrity(1 * 1024 * 1024, 10);
-    testDataIntegrity(100, 1);
-  }
-
-  @Test
-  public void testMediumDataWrites() throws IOException {
-    testDataIntegrity(4 * 1024 * 1024, 1 * 1024 * 1024);
-    testDataIntegrity(7 * 1024 * 1024, 2 * 1024 * 1024);
-    testDataIntegrity(9 * 1024 * 1024, 2 * 1024 * 1024);
-    testDataIntegrity(10 * 1024 * 1024, 3 * 1024 * 1024);
-  }
-
-  private void testDataIntegrity(int totalSize, int chunkSize)
-      throws IOException {
-    Path path = new Path("/test/dataIntegrityCheck");
+  public void testDataIntegrity() throws IOException {
+    Path path = new Path(
+        "/test/dataIntegrityCheck/" + UUID.randomUUID().toString());
     FileSystem fs = null;
+    AdlStorageConfiguration.getConfiguration()
+        .setInt(WRITE_BUFFER_SIZE_KEY, 4 * 1024);
     try {
       fs = AdlStorageConfiguration.createStorageConnector();
     } catch (URISyntaxException e) {
       throw new IllegalStateException("Can not initialize ADL FileSystem. "
-          + "Please check fs.defaultFS property.", e);
+          + "Please check test.fs.adl.name property.", e);
     }
     byte[] expectedData = getRandomByteArrayData(totalSize);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextCreateMkdirLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextCreateMkdirLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextCreateMkdirLive.java
new file mode 100644
index 0000000..5166de1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextCreateMkdirLive.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest;
+import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FileSystem;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+
+import java.net.URI;
+import java.util.UUID;
+
+/**
+ * Test file context Create/Mkdir operation.
+ */
+public class TestAdlFileContextCreateMkdirLive
+    extends FileContextCreateMkdirBaseTest {
+  private static final String KEY_FILE_SYSTEM = "test.fs.adl.name";
+
+  @BeforeClass
+  public static void skipTestCheck() {
+    Assume.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    Configuration conf = AdlStorageConfiguration.getConfiguration();
+    String fileSystem = conf.get(KEY_FILE_SYSTEM);
+    if (fileSystem == null || fileSystem.trim().length() == 0) {
+      throw new Exception("Default file system not configured.");
+    }
+    URI uri = new URI(fileSystem);
+    FileSystem fs = AdlStorageConfiguration.createStorageConnector();
+    fc = FileContext.getFileContext(
+        new DelegateToFileSystem(uri, fs, conf, fs.getScheme(), false) {
+        }, conf);
+    super.setUp();
+  }
+
+  @Override
+  protected FileContextTestHelper createFileContextHelper() {
+    // On Windows, root directory path is created from local running directory.
+    // Adl does not support ':' as part of the path which results in failure.
+    return new FileContextTestHelper(UUID.randomUUID().toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextMainOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextMainOperationsLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextMainOperationsLive.java
new file mode 100644
index 0000000..ee10da7
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileContextMainOperationsLive.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.UUID;
+
+import static org.apache.hadoop.util.Shell.WINDOWS;
+
+/**
+ * Run collection of tests for the {@link FileContext}.
+ */
+public class TestAdlFileContextMainOperationsLive
+    extends FileContextMainOperationsBaseTest {
+
+  private static final String KEY_FILE_SYSTEM = "test.fs.adl.name";
+
+  @BeforeClass
+  public static void skipTestCheck() {
+    Assume.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    Configuration conf = AdlStorageConfiguration.getConfiguration();
+    String fileSystem = conf.get(KEY_FILE_SYSTEM);
+    if (fileSystem == null || fileSystem.trim().length() == 0) {
+      throw new Exception("Default file system not configured.");
+    }
+    URI uri = new URI(fileSystem);
+    FileSystem fs = AdlStorageConfiguration.createStorageConnector();
+    fc = FileContext.getFileContext(
+        new DelegateToFileSystem(uri, fs, conf, fs.getScheme(), false) {
+        }, conf);
+    super.setUp();
+  }
+
+  @Override
+  protected FileContextTestHelper createFileContextHelper() {
+    // On Windows, root directory path is created from local running directory.
+    // Adl does not support ':' as part of the path which results in failure.
+    //    return new FileContextTestHelper(GenericTestUtils
+    // .getRandomizedTestDir()
+    //        .getAbsolutePath().replaceAll(":",""));
+    return new FileContextTestHelper(UUID.randomUUID().toString());
+  }
+
+  @Override
+  protected boolean listCorruptedBlocksSupported() {
+    return false;
+  }
+
+  @Override
+  public void testWorkingDirectory() throws Exception {
+    if (WINDOWS) {
+      // TODO :Fix is required in Hadoop shell to support windows permission
+      // set.
+      // The test is failing with NPE on windows platform only, with Linux
+      // platform test passes.
+      Assume.assumeTrue(false);
+    } else {
+      super.testWorkingDirectory();
+    }
+  }
+
+  @Override
+  public void testUnsupportedSymlink() throws IOException {
+    Assume.assumeTrue(false);
+  }
+
+  @Test
+  public void testSetVerifyChecksum() throws IOException {
+    Assume.assumeTrue(false);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
index 0df7d05..657947e 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
@@ -22,12 +22,13 @@ package org.apache.hadoop.fs.adl.live;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.Assume;
+import org.junit.Before;
 
 import java.io.IOException;
 
 /**
- * Verify Adls adhere to Hadoop file system semantics.
+ * Test Base contract tests on Adl file system.
  */
 public class TestAdlFileSystemContractLive extends FileSystemContractBaseTest {
   private FileSystem adlStore;
@@ -60,52 +61,8 @@ public class TestAdlFileSystemContractLive extends FileSystemContractBaseTest {
     }
   }
 
-  public void testGetFileStatus() throws IOException {
-    if (!AdlStorageConfiguration.isContractTestEnabled()) {
-      return;
-    }
-
-    Path testPath = new Path("/test/adltest");
-    if (adlStore.exists(testPath)) {
-      adlStore.delete(testPath, false);
-    }
-
-    adlStore.create(testPath).close();
-    assertTrue(adlStore.delete(testPath, false));
-  }
-
-  /**
-   * The following tests are failing on Azure Data Lake and the Azure Data Lake
-   * file system code needs to be modified to make them pass.
-   * A separate work item has been opened for this.
-   */
-  @Test
-  @Override
-  public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
-    // BUG : Adl should return exception instead of false.
-  }
-
-  @Test
-  @Override
-  public void testMkdirsWithUmask() throws Exception {
-    // Support under implementation in Adl
-  }
-
-  @Test
-  @Override
-  public void testMoveFileUnderParent() throws Exception {
-    // BUG: Adl server should return expected status code.
-  }
-
-  @Test
-  @Override
-  public void testRenameFileToSelf() throws Exception {
-    // BUG: Adl server should return expected status code.
-  }
-
-  @Test
-  @Override
-  public void testRenameToDirWithSamePrefixAllowed() throws Exception {
-    // BUG: Adl server should return expected status code.
+  @Before
+  public void skipTestCheck() {
+    Assume.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlInternalCreateNonRecursive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlInternalCreateNonRecursive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlInternalCreateNonRecursive.java
new file mode 100644
index 0000000..7e11a54
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlInternalCreateNonRecursive.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.common.Parallelized;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.UUID;
+
+/**
+ * Test createNonRecursive API.
+ */
+@RunWith(Parallelized.class)
+public class TestAdlInternalCreateNonRecursive {
+  private Path inputFileName;
+  private FsPermission inputPermission;
+  private boolean inputOverride;
+  private boolean inputFileAlreadyExist;
+  private boolean inputParentAlreadyExist;
+  private Class<IOException> expectedExceptionType;
+  private FileSystem adlStore;
+
+  public TestAdlInternalCreateNonRecursive(String testScenario, String fileName,
+      FsPermission permission, boolean override, boolean fileAlreadyExist,
+      boolean parentAlreadyExist, Class<IOException> exceptionType) {
+
+    // Random parent path for each test so that parallel execution does not fail
+    // other running test.
+    inputFileName = new Path(
+        "/test/createNonRecursive/" + UUID.randomUUID().toString(), fileName);
+    inputPermission = permission;
+    inputFileAlreadyExist = fileAlreadyExist;
+    inputOverride = override;
+    inputParentAlreadyExist = parentAlreadyExist;
+    expectedExceptionType = exceptionType;
+  }
+
+  @Parameterized.Parameters(name = "{0}")
+  public static Collection adlCreateNonRecursiveTestData()
+      throws UnsupportedEncodingException {
+    /*
+      Test Data
+      File name, Permission, Override flag, File already exist, Parent
+      already exist
+      shouldCreateSucceed, expectedExceptionIfFileCreateFails
+
+      File already exist and Parent already exist are mutually exclusive.
+    */
+    return Arrays.asList(new Object[][] {
+        {"CNR - When file do not exist.", UUID.randomUUID().toString(),
+            FsPermission.getFileDefault(), false, false, true, null},
+        {"CNR - When file exist. Override false", UUID.randomUUID().toString(),
+            FsPermission.getFileDefault(), false, true, true,
+            FileAlreadyExistsException.class},
+        {"CNR - When file exist. Override true", UUID.randomUUID().toString(),
+            FsPermission.getFileDefault(), true, true, true, null},
+
+        //TODO: This test is skipped till the fixes are not made it to prod.
+        /*{ "CNR - When parent do no exist.", UUID.randomUUID().toString(),
+            FsPermission.getFileDefault(), false, false, true, false,
+            IOException.class }*/});
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    Assume.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+    adlStore = AdlStorageConfiguration.createStorageConnector();
+  }
+
+  @Test
+  public void testCreateNonRecursiveFunctionality() throws IOException {
+    if (inputFileAlreadyExist) {
+      FileSystem.create(adlStore, inputFileName, inputPermission);
+    }
+
+    // Mutually exclusive to inputFileAlreadyExist
+    if (inputParentAlreadyExist) {
+      adlStore.mkdirs(inputFileName.getParent());
+    } else {
+      adlStore.delete(inputFileName.getParent(), true);
+    }
+
+    try {
+      adlStore.createNonRecursive(inputFileName, inputPermission, inputOverride,
+          CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT,
+          adlStore.getDefaultReplication(inputFileName),
+          adlStore.getDefaultBlockSize(inputFileName), null);
+    } catch (IOException e) {
+
+      if (expectedExceptionType == null) {
+        throw e;
+      }
+
+      Assert.assertEquals(expectedExceptionType, e.getClass());
+      return;
+    }
+
+    if (expectedExceptionType != null) {
+      Assert.fail("CreateNonRecursive should have failed with exception "
+          + expectedExceptionType.getName());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlPermissionLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlPermissionLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlPermissionLive.java
new file mode 100644
index 0000000..dd7c10d
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlPermissionLive.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.common.Parallelized;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.*;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.UUID;
+
+/**
+ * Test ACL permission on file/folder on Adl file system.
+ */
+@RunWith(Parallelized.class)
+public class TestAdlPermissionLive {
+
+  private static Path testRoot = new Path("/test");
+  private FsPermission permission;
+  private Path path;
+  private FileSystem adlStore;
+
+  public TestAdlPermissionLive(FsPermission testPermission) {
+    permission = testPermission;
+  }
+
+  @Parameterized.Parameters(name = "{0}")
+  public static Collection adlCreateNonRecursiveTestData()
+      throws UnsupportedEncodingException {
+    /*
+      Test Data
+      File/Folder name, User permission, Group permission, Other Permission,
+      Parent already exist
+      shouldCreateSucceed, expectedExceptionIfFileCreateFails
+    */
+    final Collection<Object[]> datas = new ArrayList<>();
+    for (FsAction g : FsAction.values()) {
+      for (FsAction o : FsAction.values()) {
+        datas.add(new Object[] {new FsPermission(FsAction.ALL, g, o)});
+      }
+    }
+    return datas;
+  }
+
+  @AfterClass
+  public static void cleanUp() throws IOException, URISyntaxException {
+    if (AdlStorageConfiguration.isContractTestEnabled()) {
+      Assert.assertTrue(AdlStorageConfiguration.createStorageConnector()
+          .delete(testRoot, true));
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    Assume.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+    adlStore = AdlStorageConfiguration.createStorageConnector();
+  }
+
+  @Test
+  public void testFilePermission() throws IOException {
+    path = new Path(testRoot, UUID.randomUUID().toString());
+    adlStore.getConf()
+        .set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
+
+    adlStore.mkdirs(path.getParent(),
+        new FsPermission(FsAction.ALL, FsAction.WRITE, FsAction.NONE));
+    adlStore.removeDefaultAcl(path.getParent());
+
+    adlStore.create(path, permission, true, 1024, (short) 1, 1023, null);
+    FileStatus status = adlStore.getFileStatus(path);
+    Assert.assertEquals(permission, status.getPermission());
+  }
+
+  @Test
+  public void testFolderPermission() throws IOException {
+    path = new Path(testRoot, UUID.randomUUID().toString());
+    adlStore.getConf()
+        .set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
+    adlStore.mkdirs(path.getParent(),
+        new FsPermission(FsAction.ALL, FsAction.WRITE, FsAction.NONE));
+    adlStore.removeDefaultAcl(path.getParent());
+
+    adlStore.mkdirs(path, permission);
+    FileStatus status = adlStore.getFileStatus(path);
+    Assert.assertEquals(permission, status.getPermission());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSupportedCharsetInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSupportedCharsetInPath.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSupportedCharsetInPath.java
new file mode 100644
index 0000000..d80b6bf
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSupportedCharsetInPath.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.adl.common.Parallelized;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URISyntaxException;
+import java.util.*;
+
+/**
+ * Test supported ASCII, UTF-8 character set supported by Adl storage file
+ * system on file/folder operation.
+ */
+@RunWith(Parallelized.class)
+public class TestAdlSupportedCharsetInPath {
+
+  private static final String TEST_ROOT = "/test/";
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestAdlSupportedCharsetInPath.class);
+  private String path;
+
+  public TestAdlSupportedCharsetInPath(String filePath) {
+    path = filePath;
+  }
+
+  @Parameterized.Parameters(name = "{0}")
+  public static Collection<Object[]> adlCharTestData()
+      throws UnsupportedEncodingException {
+
+    ArrayList<String> filePathList = new ArrayList<>();
+    for (int i = 32; i < 127; ++i) {
+      String specialChar = (char) i + "";
+      if (i >= 48 && i <= 57) {
+        continue;
+      }
+
+      if (i >= 65 && i <= 90) {
+        continue;
+      }
+
+      if (i >= 97 && i <= 122) {
+        continue;
+      }
+
+      // Special char at start of the path
+      if (i != 92 && i != 58 && i != 46 && i != 47) {
+        filePathList.add(specialChar + "");
+      }
+
+      // Special char at end of string
+      if (i != 92 && i != 47 && i != 58) {
+        filePathList.add("file " + i + " " + specialChar);
+      }
+
+      // Special char in between string
+      if (i != 47 && i != 58 && i != 92) {
+        filePathList.add("file " + i + " " + specialChar + "_name");
+      }
+    }
+
+    filePathList.add("a  ");
+    filePathList.add("a..b");
+    fillUnicodes(filePathList);
+    Collection<Object[]> result = new ArrayList<>();
+    for (String item : filePathList) {
+      result.add(new Object[] {item});
+    }
+    return result;
+  }
+
+  private static void fillUnicodes(ArrayList<String> filePathList) {
+    // Unicode characters
+    filePathList.add("\u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0643\u0628\u064a\u0631\u0629"); // Arabic
+    filePathList.add("T� dh�nat i madh"); // Albanian
+    filePathList.add("\u0574\u0565\u056e \u057f\u057e\u0575\u0561\u056c\u0576\u0565\u0580\u0568"); // Armenian
+    filePathList.add("b�y�k data"); // Azerbaijani
+    filePathList.add("\u0432\u044f\u043b\u0456\u043a\u0456\u044f \u0434\u0430\u0434\u0437\u0435\u043d\u044b\u044f"); // Belarusian,
+    filePathList.add("\u09ac\u09bf\u0997 \u09a1\u09c7\u099f\u09be"); // Bengali
+    filePathList.add("veliki podataka"); // Bosnian
+    filePathList.add("\u0433\u043e\u043b\u044f\u043c\u0430 \u0434\u0430\u043d\u043d\u0438"); // Bulgarian
+    filePathList.add("\u5927\u6570\u636e"); // Chinese - Simplified
+    filePathList.add("\u5927\u6578\u64da"); // Chinese - Traditional
+    filePathList.add("\u10d3\u10d8\u10d3\u10d8 \u10db\u10dd\u10dc\u10d0\u10ea\u10d4\u10db\u10d7\u10d0"); // Georgian,
+    filePathList.add("gro�e Daten"); // German
+    filePathList.add("\u03bc\u03b5\u03b3\u03ac\u03bb\u03bf \u03b4\u03b5\u03b4\u03bf\u03bc\u03ad\u03bd\u03b1"); // Greek
+    filePathList.add("\u0aae\u0acb\u0a9f\u0abe \u0aae\u0abe\u0ab9\u0abf\u0aa4\u0ac0"); // Gujarati
+    filePathList.add("\u05e0\u05ea\u05d5\u05e0\u05d9\u05dd \u05d2\u05d3\u05d5\u05dc\u05d9\u05dd"); // Hebrew
+    filePathList.add("\u092c\u0921\u093c\u093e \u0921\u0947\u091f\u093e"); // Hindi
+    filePathList.add("st�r g�gn"); // Icelandic
+    filePathList.add("sonra� m�r"); // Irish
+    filePathList.add("\u30d3\u30c3\u30b0\u30c7\u30fc\u30bf"); // Japanese
+    filePathList.add("\u04af\u043b\u043a\u0435\u043d \u0434\u0435\u0440\u0435\u043a\u0442\u0435\u0440"); // Kazakh
+    filePathList.add("\u1791\u17b7\u1793\u17d2\u1793\u1793\u17d0\u1799\u1792\u17c6"); // Khmer
+    filePathList.add("\ube45 \ub370\uc774\ud130"); // Korean
+    filePathList.add("\u0e82\u0ecd\u0ec9\u0ea1\u0eb9\u0e99 \u0e82\u0eb0\u0eab\u0e99\u0eb2\u0e94\u0ec3\u0eab\u0e8d\u0ec8"); // Lao
+    filePathList.add("\u0433\u043e\u043b\u0435\u043c\u0438 \u043f\u043e\u0434\u0430\u0442\u043e\u0446\u0438"); // Macedonian
+    filePathList.add("\u0920\u0942\u0932\u094b \u0921\u093e\u091f\u093e"); // Nepali
+    filePathList.add("\u0d35\u0d32\u0d3f\u0d2f \u0d21\u0d3e\u0d31\u0d4d\u0d31"); // Malayalam
+    filePathList.add("\u092e\u094b\u0920\u0947 \u0921\u0947\u091f\u093e"); // Marathi
+    filePathList.add("\u0442\u043e\u043c \u043c\u044d\u0434\u044d\u044d\u043b\u044d\u043b"); // Mangolian
+    filePathList.add("\u0627\u0637\u0644\u0627\u0639\u0627\u062a \u0628\u0632\u0631\u06af"); // Persian
+    filePathList.add("\u0a35\u0a71\u0a21\u0a47 \u0a21\u0a3e\u0a1f\u0a47 \u0a28\u0a42\u0a70"); // Punjabi
+    filePathList.add("\u0431\u043e\u043b\u044c\u0448\u0438\u0435 \u0434\u0430\u043d\u043d\u044b\u0435"); // Russian
+    filePathList.add("\u0412\u0435\u043b\u0438\u043a\u0438 \u043f\u043e\u0434\u0430\u0442\u0430\u043a\u0430"); // Serbian
+    filePathList.add("\u0dc0\u0dd2\u0dc1\u0dcf\u0dbd \u0daf\u0dad\u0dca\u0dad"); // Sinhala
+    filePathList.add("big d�t"); // Slovak
+    filePathList.add("\u043c\u0430\u044a\u043b\u0443\u043c\u043e\u0442\u0438 \u043a\u0430\u043b\u043e\u043d"); // Tajik
+    filePathList.add("\u0baa\u0bc6\u0bb0\u0bbf\u0baf \u0ba4\u0bb0\u0bb5\u0bc1"); // Tamil
+    filePathList.add("\u0c2a\u0c46\u0c26\u0c4d\u0c26 \u0c21\u0c47\u0c1f\u0c3e"); // Telugu
+    filePathList.add("\u0e02\u0e49\u0e2d\u0e21\u0e39\u0e25\u0e43\u0e2b\u0e0d\u0e48"); // Thai
+    filePathList.add("b�y�k veri"); // Turkish
+    filePathList.add("\u0432\u0435\u043b\u0438\u043a\u0456 \u0434\u0430\u043d\u0456"); // Ukranian
+    filePathList.add("\u0628\u0691\u06d2 \u0627\u0639\u062f\u0627\u062f \u0648 \u0634\u0645\u0627\u0631"); // Urdu
+    filePathList.add("katta ma'lumotlar"); // Uzbek
+    filePathList.add("d\u1eef li\u1ec7u l\u1edbn"); // Vietanamese
+    filePathList.add("\u05d2\u05e8\u05d5\u05d9\u05e1 \u05d3\u05d0\u05b7\u05d8\u05df"); // Yiddish
+    filePathList.add("big idatha"); // Zulu
+    filePathList.add("rachel\u03c7");
+    filePathList.add("jessica\u03bf");
+    filePathList.add("sarah\u03b4");
+    filePathList.add("katie\u03bd");
+    filePathList.add("wendy\u03be");
+    filePathList.add("david\u03bc");
+    filePathList.add("priscilla\u03c5");
+    filePathList.add("oscar\u03b8");
+    filePathList.add("xavier\u03c7");
+    filePathList.add("gabriella\u03b8");
+    filePathList.add("david\u03c5");
+    filePathList.add("irene\u03bc");
+    filePathList.add("fred\u03c1");
+    filePathList.add("david\u03c4");
+    filePathList.add("ulysses\u03bd");
+    filePathList.add("gabriella\u03bc");
+    filePathList.add("zach\u03b6");
+    filePathList.add("gabriella\u03bb");
+    filePathList.add("ulysses\u03c6");
+    filePathList.add("david\u03c7");
+    filePathList.add("sarah\u03c3");
+    filePathList.add("holly\u03c8");
+    filePathList.add("nick\u03b1");
+    filePathList.add("ulysses\u03b9");
+    filePathList.add("mike\u03b2");
+    filePathList.add("priscilla\u03ba");
+    filePathList.add("wendy\u03b8");
+    filePathList.add("jessica\u03c2");
+    filePathList.add("fred\u03c7");
+    filePathList.add("fred\u03b6");
+    filePathList.add("sarah\u03ba");
+    filePathList.add("calvin\u03b7");
+    filePathList.add("xavier\u03c7");
+    filePathList.add("yuri\u03c7");
+    filePathList.add("ethan\u03bb");
+    filePathList.add("holly\u03b5");
+    filePathList.add("xavier\u03c3");
+    filePathList.add("victor\u03c4");
+    filePathList.add("wendy\u03b2");
+    filePathList.add("jessica\u03c2");
+    filePathList.add("quinn\u03c6");
+    filePathList.add("xavier\u03c5");
+    filePathList.add("nick\u03b9");
+    filePathList.add("rachel\u03c6");
+    filePathList.add("oscar\u03be");
+    filePathList.add("zach\u03b4");
+    filePathList.add("zach\u03bb");
+    filePathList.add("rachel\u03b1");
+    filePathList.add("jessica\u03c6");
+    filePathList.add("luke\u03c6");
+    filePathList.add("tom\u03b6");
+    filePathList.add("nick\u03be");
+    filePathList.add("nick\u03ba");
+    filePathList.add("ethan\u03b4");
+    filePathList.add("fred\u03c7");
+    filePathList.add("priscilla\u03b8");
+    filePathList.add("zach\u03be");
+    filePathList.add("xavier\u03be");
+    filePathList.add("zach\u03c8");
+    filePathList.add("ethan\u03b1");
+    filePathList.add("oscar\u03b9");
+    filePathList.add("irene\u03b4");
+    filePathList.add("irene\u03b6");
+    filePathList.add("victor\u03bf");
+    filePathList.add("wendy\u03b2");
+    filePathList.add("mike\u03c3");
+    filePathList.add("fred\u03bf");
+    filePathList.add("mike\u03b7");
+    filePathList.add("sarah\u03c1");
+    filePathList.add("quinn\u03b2");
+    filePathList.add("mike\u03c5");
+    filePathList.add("nick\u03b6");
+    filePathList.add("nick\u03bf");
+    filePathList.add("tom\u03ba");
+    filePathList.add("bob\u03bb");
+    filePathList.add("yuri\u03c0");
+    filePathList.add("david\u03c4");
+    filePathList.add("quinn\u03c0");
+    filePathList.add("mike\u03bb");
+    filePathList.add("david\u03b7");
+    filePathList.add("ethan\u03c4");
+    filePathList.add("nick\u03c6");
+    filePathList.add("yuri\u03bf");
+    filePathList.add("ethan\u03c5");
+    filePathList.add("bob\u03b8");
+    filePathList.add("david\u03bb");
+    filePathList.add("priscilla\u03be");
+    filePathList.add("nick\u03b3");
+    filePathList.add("luke\u03c5");
+    filePathList.add("irene\u03bb");
+    filePathList.add("xavier\u03bf");
+    filePathList.add("fred\u03c5");
+    filePathList.add("ulysses\u03bc");
+    filePathList.add("wendy\u03b3");
+    filePathList.add("zach\u03bb");
+    filePathList.add("rachel\u03c2");
+    filePathList.add("sarah\u03c0");
+    filePathList.add("alice\u03c8");
+    filePathList.add("bob\u03c4");
+  }
+
+  @AfterClass
+  public static void testReport() throws IOException, URISyntaxException {
+    if (!AdlStorageConfiguration.isContractTestEnabled()) {
+      return;
+    }
+
+    FileSystem fs = AdlStorageConfiguration.createStorageConnector();
+    fs.delete(new Path(TEST_ROOT), true);
+  }
+
+  @Test
+  public void testAllowedSpecialCharactersMkdir()
+      throws IOException, URISyntaxException {
+    Path parentPath = new Path(TEST_ROOT, UUID.randomUUID().toString() + "/");
+    Path specialFile = new Path(parentPath, path);
+    FileSystem fs = AdlStorageConfiguration.createStorageConnector();
+
+    Assert.assertTrue("Mkdir failed : " + specialFile, fs.mkdirs(specialFile));
+    Assert.assertTrue("File not Found after Mkdir success" + specialFile,
+        fs.exists(specialFile));
+    Assert.assertTrue("Not listed under parent " + parentPath,
+        contains(fs.listStatus(parentPath),
+            fs.makeQualified(specialFile).toString()));
+    Assert.assertTrue("Delete failed : " + specialFile,
+            fs.delete(specialFile, true));
+    Assert.assertFalse("File still exist after delete " + specialFile,
+        fs.exists(specialFile));
+  }
+
+  private boolean contains(FileStatus[] statuses, String remotePath) {
+    for (FileStatus status : statuses) {
+      if (status.getPath().toString().equals(remotePath)) {
+        return true;
+      }
+    }
+
+    Arrays.stream(statuses).forEach(s -> LOG.info(s.getPath().toString()));
+    return false;
+  }
+
+  @Before
+  public void setup() throws Exception {
+    org.junit.Assume
+        .assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+  }
+
+  @Test
+  public void testAllowedSpecialCharactersRename()
+      throws IOException, URISyntaxException {
+
+    String parentPath = TEST_ROOT + UUID.randomUUID().toString() + "/";
+    Path specialFile = new Path(parentPath + path);
+    Path anotherLocation = new Path(parentPath + UUID.randomUUID().toString());
+    FileSystem fs = AdlStorageConfiguration.createStorageConnector();
+
+    Assert.assertTrue("Could not create " + specialFile.toString(),
+        fs.createNewFile(specialFile));
+    Assert.assertTrue(
+        "Failed to rename " + specialFile.toString() + " --> " + anotherLocation
+            .toString(), fs.rename(specialFile, anotherLocation));
+    Assert.assertFalse("File should not be present after successful rename : "
+        + specialFile.toString(), fs.exists(specialFile));
+    Assert.assertTrue("File should be present after successful rename : "
+        + anotherLocation.toString(), fs.exists(anotherLocation));
+    Assert.assertFalse(
+        "Listed under parent whereas expected not listed : " + parentPath,
+        contains(fs.listStatus(new Path(parentPath)),
+            fs.makeQualified(specialFile).toString()));
+
+    Assert.assertTrue(
+        "Failed to rename " + anotherLocation.toString() + " --> " + specialFile
+            .toString(), fs.rename(anotherLocation, specialFile));
+    Assert.assertTrue(
+        "File should be present after successful rename : " + "" + specialFile
+            .toString(), fs.exists(specialFile));
+    Assert.assertFalse("File should not be present after successful rename : "
+        + anotherLocation.toString(), fs.exists(anotherLocation));
+
+    Assert.assertTrue("Not listed under parent " + parentPath,
+        contains(fs.listStatus(new Path(parentPath)),
+            fs.makeQualified(specialFile).toString()));
+
+    Assert.assertTrue("Failed to delete " + parentPath,
+        fs.delete(new Path(parentPath), true));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestMetadata.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestMetadata.java
new file mode 100644
index 0000000..3b9e7da
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestMetadata.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.UUID;
+
+/**
+ * This class is responsible for testing ContentSummary, ListStatus on
+ * file/folder.
+ */
+public class TestMetadata {
+
+  private FileSystem adlStore;
+  private Path parent;
+
+  public TestMetadata() {
+    parent = new Path("test");
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    Assume.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+    adlStore = AdlStorageConfiguration.createStorageConnector();
+  }
+
+  @After
+  public void cleanUp() throws Exception {
+    if (AdlStorageConfiguration.isContractTestEnabled()) {
+      adlStore.delete(parent, true);
+    }
+  }
+
+  @Test
+  public void testContentSummaryOnFile() throws IOException {
+    Path child = new Path(UUID.randomUUID().toString());
+    Path testFile = new Path(parent, child);
+    OutputStream out = adlStore.create(testFile);
+
+    for (int i = 0; i < 1024; ++i) {
+      out.write(97);
+    }
+    out.close();
+
+    Assert.assertTrue(adlStore.isFile(testFile));
+    ContentSummary summary = adlStore.getContentSummary(testFile);
+    Assert.assertEquals(1024, summary.getSpaceConsumed());
+    Assert.assertEquals(1, summary.getFileCount());
+    Assert.assertEquals(0, summary.getDirectoryCount());
+    Assert.assertEquals(1024, summary.getLength());
+  }
+
+  @Test
+  public void testContentSummaryOnFolder() throws IOException {
+    Path child = new Path(UUID.randomUUID().toString());
+    Path testFile = new Path(parent, child);
+    OutputStream out = adlStore.create(testFile);
+
+    for (int i = 0; i < 1024; ++i) {
+      out.write(97);
+    }
+    out.close();
+
+    Assert.assertTrue(adlStore.isFile(testFile));
+    ContentSummary summary = adlStore.getContentSummary(parent);
+    Assert.assertEquals(1024, summary.getSpaceConsumed());
+    Assert.assertEquals(1, summary.getFileCount());
+    Assert.assertEquals(1, summary.getDirectoryCount());
+    Assert.assertEquals(1024, summary.getLength());
+  }
+
+  @Test
+  public void listStatusOnFile() throws IOException {
+    Path path = new Path(parent, "a.txt");
+    FileSystem fs = adlStore;
+    fs.createNewFile(path);
+    Assert.assertTrue(fs.isFile(path));
+    FileStatus[] statuses = fs.listStatus(path);
+    Assert
+        .assertEquals(path.makeQualified(fs.getUri(), fs.getWorkingDirectory()),
+            statuses[0].getPath());
+  }
+}
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[8/8] hadoop git commit: YARN-5746. The state of the parentQueue and its childQueues should be synchronized. Contributed by Xuan Gong

Posted by as...@apache.org.
YARN-5746. The state of the parentQueue and its childQueues should be synchronized. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f885160f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f885160f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f885160f

Branch: refs/heads/YARN-5085
Commit: f885160f4ac56a0999e3b051eb7bccce928c1c33
Parents: 4113ec5
Author: Jian He <ji...@apache.org>
Authored: Fri Dec 2 16:17:31 2016 -0800
Committer: Jian He <ji...@apache.org>
Committed: Fri Dec 2 16:17:31 2016 -0800

----------------------------------------------------------------------
 .../scheduler/capacity/AbstractCSQueue.java     | 26 +++++-
 .../CapacitySchedulerConfiguration.java         | 22 ++++-
 .../scheduler/capacity/TestQueueState.java      | 96 ++++++++++++++++++++
 3 files changed, 139 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f885160f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 3daabaf..dd2f0d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -291,7 +291,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 
       authorizer = YarnAuthorizationProvider.getInstance(csContext.getConf());
 
-      this.state = csContext.getConfiguration().getState(getQueuePath());
+      initializeQueueState();
+
       this.acls = csContext.getConfiguration().getAcls(getQueuePath());
 
       // Update metrics
@@ -330,6 +331,29 @@ public abstract class AbstractCSQueue implements CSQueue {
     }
   }
 
+  private void initializeQueueState() {
+    // inherit from parent if state not set, only do this when we are not root
+    if (parent != null) {
+      QueueState configuredState = csContext.getConfiguration()
+          .getConfiguredState(getQueuePath());
+      QueueState parentState = parent.getState();
+      if (configuredState == null) {
+        this.state = parentState;
+      } else if (configuredState == QueueState.RUNNING
+          && parentState == QueueState.STOPPED) {
+        throw new IllegalArgumentException(
+            "The parent queue:" + parent.getQueueName() + " state is STOPPED, "
+            + "child queue:" + queueName + " state cannot be RUNNING.");
+      } else {
+        this.state = configuredState;
+      }
+    } else {
+      // if this is the root queue, get the state from the configuration.
+      // if the state is not set, use RUNNING as default state.
+      this.state = csContext.getConfiguration().getState(getQueuePath());
+    }
+  }
+
   protected QueueInfo getQueueInfo() {
     // Deliberately doesn't use lock here, because this method will be invoked
     // from schedulerApplicationAttempt, to avoid deadlock, sacrifice

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f885160f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index f8335a8..bfaeba4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -448,12 +448,26 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
     setFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, userLimitFactor); 
   }
   
-  public QueueState getState(String queue) {
+  public QueueState getConfiguredState(String queue) {
     String state = get(getQueuePrefix(queue) + STATE);
-    return (state != null) ? 
-        QueueState.valueOf(StringUtils.toUpperCase(state)) : QueueState.RUNNING;
+    if (state == null) {
+      return null;
+    } else {
+      return QueueState.valueOf(StringUtils.toUpperCase(state));
+    }
   }
-  
+
+  public QueueState getState(String queue) {
+    QueueState state = getConfiguredState(queue);
+    return (state == null) ? QueueState.RUNNING : state;
+  }
+
+  @Private
+  @VisibleForTesting
+  public void setState(String queue, QueueState state) {
+    set(getQueuePrefix(queue) + STATE, state.name());
+  }
+
   public void setAccessibleNodeLabels(String queue, Set<String> labels) {
     if (labels == null) {
       return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f885160f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
new file mode 100644
index 0000000..bd878b7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.io.IOException;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test Queue States.
+ */
+public class TestQueueState {
+
+  private static final String Q1 = "q1";
+  private static final String Q2 = "q2";
+
+  private final static String Q1_PATH =
+      CapacitySchedulerConfiguration.ROOT + "." + Q1;
+  private final static String Q2_PATH =
+      Q1_PATH + "." + Q2;
+  private CapacityScheduler cs;
+  private YarnConfiguration conf;
+
+  @Test (timeout = 15000)
+  public void testQueueState() throws IOException {
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {Q1});
+    csConf.setQueues(Q1_PATH, new String[] {Q2});
+
+    csConf.setCapacity(Q1_PATH, 100);
+    csConf.setCapacity(Q2_PATH, 100);
+
+    conf = new YarnConfiguration(csConf);
+    cs = new CapacityScheduler();
+
+    RMContext rmContext = TestUtils.getMockRMContext();
+    cs.setConf(conf);
+    cs.setRMContext(rmContext);
+    cs.init(conf);
+
+    //by default, the state of both queues should be RUNNING
+    Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q1).getState());
+    Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q2).getState());
+
+    // Change the state of Q1 to STOPPED, and re-initiate the CS
+    csConf.setState(Q1_PATH, QueueState.STOPPED);
+    conf = new YarnConfiguration(csConf);
+    cs.reinitialize(conf, rmContext);
+    // The state of Q1 and its child: Q2 should be STOPPED
+    Assert.assertEquals(QueueState.STOPPED, cs.getQueue(Q1).getState());
+    Assert.assertEquals(QueueState.STOPPED, cs.getQueue(Q2).getState());
+
+    // Change the state of Q1 to RUNNING, and change the state of Q2 to STOPPED
+    csConf.setState(Q1_PATH, QueueState.RUNNING);
+    csConf.setState(Q2_PATH, QueueState.STOPPED);
+    conf = new YarnConfiguration(csConf);
+    // reinitialize the CS, the operation should be successful
+    cs.reinitialize(conf, rmContext);
+    Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q1).getState());
+    Assert.assertEquals(QueueState.STOPPED, cs.getQueue(Q2).getState());
+
+    // Change the state of Q1 to STOPPED, and change the state of Q2 to RUNNING
+    csConf.setState(Q1_PATH, QueueState.STOPPED);
+    csConf.setState(Q2_PATH, QueueState.RUNNING);
+    conf = new YarnConfiguration(csConf);
+    // reinitialize the CS, the operation should be failed.
+    try {
+      cs.reinitialize(conf, rmContext);
+      Assert.fail("Should throw an Exception.");
+    } catch (Exception ex) {
+      Assert.assertTrue(ex.getCause().getMessage().contains(
+          "The parent queue:q1 state is STOPPED, "
+          + "child queue:q2 state cannot be RUNNING."));
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org