You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2020/06/04 12:09:31 UTC

[hadoop-ozone] branch master updated: HDDS-3627. Remove FilteredClassloader and replace with maven based hadoop2/hadoop3 ozonefs generation (#992)

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 072370b  HDDS-3627. Remove FilteredClassloader and replace with maven based hadoop2/hadoop3 ozonefs generation (#992)
072370b is described below

commit 072370b947416d89fae11d00a84a1d9a6b31beaa
Author: Elek, Márton <el...@users.noreply.github.com>
AuthorDate: Thu Jun 4 14:09:24 2020 +0200

    HDDS-3627. Remove FilteredClassloader and replace with maven based hadoop2/hadoop3 ozonefs generation (#992)
---
 .../java/org/apache/hadoop/hdds/utils/IOUtils.java |  54 +++
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |  10 +-
 .../common/src/main/resources/ozone-default.xml    |  16 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   4 +-
 hadoop-ozone/dist/pom.xml                          |   4 +-
 .../compose/ozone-mr/hadoop27/docker-compose.yaml  |   4 +-
 .../main/compose/ozone-mr/hadoop27/docker-config   |   4 +-
 .../ozone-mr/hadoop27/{run-test.sh => test.sh}     |   0
 .../compose/ozone-mr/hadoop31/docker-compose.yaml  |   4 +-
 .../main/compose/ozone-mr/hadoop31/docker-config   |   2 +-
 .../compose/ozone-mr/hadoop32/docker-compose.yaml  |   4 +-
 .../main/compose/ozone-mr/hadoop32/docker-config   |   2 +-
 .../compose/ozonesecure-mr/docker-compose.yaml     |   6 +-
 .../src/main/compose/ozonesecure-mr/docker-config  |   2 +-
 .../dist/src/main/smoketest/mapreduce.robot        |  12 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 210 +++++-----
 hadoop-ozone/{ozonefs => ozonefs-common}/pom.xml   |  72 +---
 .../org/apache/hadoop/fs/ozone/BasicKeyInfo.java   |   0
 .../java/org/apache/hadoop/fs/ozone/BasicOzFs.java |   0
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   0
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  66 +--
 .../hadoop/fs/ozone/CapableOzoneFSInputStream.java |   0
 .../java/org/apache/hadoop/fs/ozone/Constants.java |   0
 .../apache/hadoop/fs/ozone/FileStatusAdapter.java  |   0
 .../org/apache/hadoop/fs/ozone/O3fsDtFetcher.java  |   0
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   0
 .../hadoop/fs/ozone/OzoneClientAdapterImpl.java    |   0
 .../apache/hadoop/fs/ozone/OzoneFSInputStream.java |   0
 .../hadoop/fs/ozone/OzoneFSOutputStream.java       |   0
 .../hadoop/fs/ozone/OzoneFSStorageStatistics.java  |   0
 .../org/apache/hadoop/fs/ozone/OzoneFsShell.java   |   0
 .../hadoop/fs/ozone/OzoneStreamCapabilities.java   |   0
 .../java/org/apache/hadoop/fs/ozone/Statistic.java |   0
 .../org/apache/hadoop/fs/ozone/package-info.java}  |  30 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |   0
 .../org/apache/hadoop/fs/ozone/package-info.java   |   0
 .../services/org.apache.hadoop.fs.FileSystem       |   0
 .../src/test/resources/log4j.properties            |   0
 hadoop-ozone/ozonefs-hadoop2/pom.xml               | 105 +++++
 .../fs/ozone/Hadoop27OmTransportFactory.java}      |  30 +-
 .../hadoop/fs/ozone/Hadoop27RpcTransport.java      |  82 ++++
 .../java/org/apache/hadoop/fs/ozone/OzFs.java}     |  18 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java}   |  17 +-
 .../org/apache/hadoop/fs/ozone/package-info.java}  |  30 +-
 .../services/org.apache.hadoop.fs.FileSystem       |   0
 ....hadoop.ozone.om.protocolPB.OmTransportFactory} |   2 +-
 hadoop-ozone/ozonefs-hadoop3/pom.xml               |  85 ++++
 .../java/org/apache/hadoop/fs/ozone/OzFs.java}     |  18 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java    |  33 +-
 .../org/apache/hadoop/fs/ozone/package-info.java}  |  30 +-
 .../services/org.apache.hadoop.fs.FileSystem       |   0
 hadoop-ozone/ozonefs-lib-legacy/pom.xml            | 138 ------
 .../src/main/resources/ozonefs.txt                 |  21 -
 .../pom.xml                                        | 122 ++----
 hadoop-ozone/ozonefs/pom.xml                       |  42 +-
 .../hadoop/fs/ozone/FilteredClassLoader.java       |  96 -----
 .../hadoop/fs/ozone/OzoneClientAdapterFactory.java | 169 --------
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java    |  33 +-
 .../services/org.apache.hadoop.fs.FileSystem       |   0
 .../hadoop/fs/ozone/TestFilteredClassLoader.java   |  63 ---
 .../fs/ozone/TestOzoneFileSystemWithMocks.java     | 171 --------
 .../hadoop/fs/ozone/TestReadWriteStatistics.java   | 463 ---------------------
 hadoop-ozone/pom.xml                               |  20 +-
 63 files changed, 642 insertions(+), 1652 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java
new file mode 100644
index 0000000..9317675
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils;
+
+import org.slf4j.Logger;
+
+/**
+ * Static helper utilities for IO / Closable classes.
+ */
+public final class IOUtils {
+
+  private IOUtils() {
+  }
+
+  /**
+   * Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
+   * null pointers. Must only be used for cleanup in exception handlers.
+   *
+   * @param logger     the log to record problems to at debug level. Can be
+   *                   null.
+   * @param closeables the objects to close
+   */
+  public static void cleanupWithLogger(Logger logger,
+      java.io.Closeable... closeables) {
+    for (java.io.Closeable c : closeables) {
+      if (c != null) {
+        try {
+          c.close();
+        } catch (Throwable e) {
+          if (logger != null) {
+            logger.debug("Exception in closing {}", c, e);
+          }
+        }
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 281f185..d3b61cd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -18,18 +18,18 @@
 
 package org.apache.hadoop.ozone;
 
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-
 import org.apache.hadoop.http.HttpConfig;
+
 import org.apache.ratis.proto.RaftProtos.ReplicationLevel;
 import org.apache.ratis.util.TimeDuration;
 
-import java.util.concurrent.TimeUnit;
-
 /**
  * This class contains constants for configuration keys used in Ozone.
  */
@@ -400,10 +400,6 @@ public final class OzoneConfigKeys {
   public static final String OZONE_S3_AUTHINFO_MAX_LIFETIME_KEY =
       "ozone.s3.token.max.lifetime";
   public static final String OZONE_S3_AUTHINFO_MAX_LIFETIME_KEY_DEFAULT = "3m";
-  //For technical reasons this is unused and hardcoded to the
-  // OzoneFileSystem.initialize.
-  public static final String OZONE_FS_ISOLATED_CLASSLOADER =
-      "ozone.fs.isolated-classloader";
 
   // Ozone Client Retry and Failover configurations
   public static final String OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY =
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 93292d8..79f3bcb 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1957,21 +1957,7 @@
       not be renewed.
     </description>
   </property>
-
-  <property>
-    <name>ozone.fs.isolated-classloader</name>
-    <value></value>
-    <tag>OZONE, OZONEFS</tag>
-    <description>
-      Enable it for older hadoops to separate the classloading of all the
-      Ozone classes. With 'true' value, ozonefs can be used with older
-      hadoop versions as the hadoop3/ozone related classes are loaded by
-      an isolated classloader.
-
-      Default depends from the used jar. true for ozone-filesystem-lib-legacy
-      jar and false for the ozone-filesystem-lib-current.jar
-    </description>
-  </property>
+  
   <property>
     <name>ozone.manager.db.checkpoint.transfer.bandwidthPerSec</name>
     <value>0</value>
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 3c333bd..4a2d713 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -108,7 +109,6 @@ import org.apache.hadoop.security.token.Token;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.commons.io.IOUtils;
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
 import org.apache.logging.log4j.util.Strings;
 import org.apache.ratis.protocol.ClientId;
@@ -795,7 +795,7 @@ public class RpcClient implements ClientProtocol {
 
   @Override
   public void close() throws IOException {
-    IOUtils.closeQuietly(ozoneManagerClient, xceiverClientManager);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient, xceiverClientManager);
   }
 
   @Override
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index 896831b..0929b24 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -324,11 +324,11 @@
       <dependencies>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-ozone-filesystem-lib-current</artifactId>
+          <artifactId>hadoop-ozone-filesystem-hadoop2</artifactId>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
+          <artifactId>hadoop-ozone-filesystem-hadoop3</artifactId>
         </dependency>
       </dependencies>
     </profile>
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
index 37afe2f..effb4a7 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
@@ -77,7 +77,7 @@ services:
       - ./docker-config
       - ../common-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop2-@project.version@.jar
     command: ["yarn", "resourcemanager"]
   nm:
     image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
@@ -88,7 +88,7 @@ services:
       - ./docker-config
       - ../common-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop2-@project.version@.jar
       WAIT_FOR: rm:8088
     command: ["yarn","nodemanager"]
 # Optional section: comment out this part to get DNS resolution for all the containers.
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
index 83694e1..0eef1f6 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
+CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
+MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop2-@project.version@.jar
 
 no_proxy=om,scm,s3g,kdc,localhost,127.0.0.1
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/run-test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh
similarity index 100%
rename from hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/run-test.sh
rename to hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
index 05464d4..351e1e9 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
@@ -77,7 +77,7 @@ services:
       - ./docker-config
       - ../common-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
     command: ["yarn", "resourcemanager"]
   nm:
     image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
@@ -88,6 +88,6 @@ services:
       - ./docker-config
       - ../common-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
       WAIT_FOR: rm:8088
     command: ["yarn","nodemanager"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
index 95bebcf..ff8f29c 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
@@ -15,6 +15,6 @@
 # limitations under the License.
 
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
 
 no_proxy=om,scm,s3g,kdc,localhost,127.0.0.1
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
index aee8910..35303cb 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
@@ -77,7 +77,7 @@ services:
       - ./docker-config
       - ../common-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
     command: ["yarn", "resourcemanager"]
   nm:
     image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
@@ -88,7 +88,7 @@ services:
       - ./docker-config
       - ../common-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
       WAIT_FOR: rm:8088
     command: ["yarn","nodemanager"]
 # Optional section: comment out this part to get DNS resolution for all the containers.
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
index 95bebcf..ff8f29c 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
@@ -15,6 +15,6 @@
 # limitations under the License.
 
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
 
 no_proxy=om,scm,s3g,kdc,localhost,127.0.0.1
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
index 1079682..e6d5a77 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
@@ -106,7 +106,7 @@ services:
     env_file:
       - ./docker-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
       KERBEROS_KEYTABS: rm HTTP hadoop
     command: ["yarn", "resourcemanager"]
   nm:
@@ -119,7 +119,7 @@ services:
     env_file:
       - ./docker-config
     environment:
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
       WAIT_FOR: rm:8088
       KERBEROS_KEYTABS: nm HTTP
     command: ["yarn","nodemanager"]
@@ -137,7 +137,7 @@ services:
       - ./docker-config
     environment:
       KERBEROS_KEYTABS: jhs HTTP
-      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+      HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
       WAIT_FOR: rm:8088
     command: ["yarn","timelineserver"]
 networks:
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
index 4497bb0..cee2a48 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
@@ -76,7 +76,7 @@ MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
 MAPRED-SITE.XML_mapreduce.map.memory.mb=2048
 MAPRED-SITE.XML_mapreduce.reduce.memory.mb=2048
 #MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2048
-MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
+MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar
 
 YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user
 YARN-SITE.XML_yarn.timeline-service.enabled=true
diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
index c1e3285..654dd49 100644
--- a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
@@ -23,15 +23,21 @@ Test Timeout        4 minute
 *** Variables ***
 ${volume}          volume1
 ${bucket}          bucket1
-${hadoop.version}  3.2.0
 
+*** Keywords ***
+Find example jar
+                    ${jar} =            Execute                 find /opt/hadoop/share/hadoop/mapreduce/ -name "*.jar" | grep mapreduce-examples | grep -v sources | grep -v test
+                    [return]            ${jar}
 
 *** Test cases ***
+
 Execute PI calculation
-                    ${output} =      Execute                 yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar pi 3 3
+                    ${exampleJar}    Find example jar
+                    ${output} =      Execute                 yarn jar ${exampleJar} pi 3 3
                     Should Contain   ${output}               completed successfully
 
 Execute WordCount
+                    ${exampleJar}    Find example jar
                     ${random}        Generate Random String  2   [NUMBERS]
-                    ${output} =      Execute                 yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar wordcount o3fs://bucket1.volume1/key1 o3fs://bucket1.volume1/key1-${random}.count
+                    ${output} =      Execute                 yarn jar ${exampleJar} wordcount o3fs://bucket1.volume1/key1 o3fs://bucket1.volume1/key1-${random}.count
                     Should Contain   ${output}               completed successfully
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 5658b53..67eb34d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -305,6 +305,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     RUNNING,
     STOPPED
   }
+
   // Used in MiniOzoneCluster testing
   private State omState;
 
@@ -398,7 +399,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
     if (secConfig.isSecurityEnabled()) {
       omComponent = OM_DAEMON + "-" + omId;
-      if(omStorage.getOmCertSerialId() == null) {
+      if (omStorage.getOmCertSerialId() == null) {
         throw new RuntimeException("OzoneManager started in secure mode but " +
             "doesn't have SCM signed certificate.");
       }
@@ -563,6 +564,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * Looks up the configuration to see if there is custom class specified.
    * Constructs the instance by passing the configuration directly to the
    * constructor to achieve thread safety using final fields.
+   *
    * @param conf
    * @return IAccessAuthorizer
    */
@@ -613,6 +615,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Returns temporary metrics storage file.
+   *
    * @return File
    */
   private File getTempMetricsStorageFile() {
@@ -621,13 +624,13 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Returns metrics storage file.
+   *
    * @return File
    */
   private File getMetricsStorageFile() {
     return new File(omMetaDir, OM_METRICS_FILE);
   }
 
-
   private OzoneDelegationTokenSecretManager createDelegationTokenSecretManager(
       OzoneConfiguration conf) throws IOException {
     long tokenRemoverScanInterval =
@@ -724,7 +727,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * For testing purpose only.
-   * */
+   */
   public void setCertClient(CertificateClient certClient) {
     // TODO: Initialize it in constructor with implementation for certClient.
     this.certClient = certClient;
@@ -751,7 +754,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   /**
    * Login OM service user if security and Kerberos are enabled.
    *
-   * @param  conf
+   * @param conf
    * @throws IOException, AuthenticationException
    */
   private static void loginOMUser(OzoneConfiguration conf)
@@ -833,10 +836,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   /**
    * Starts an RPC server, if configured.
    *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
+   * @param conf         configuration
+   * @param addr         configured address of RPC server
+   * @param protocol     RPC protocol provided by RPC server
+   * @param instance     RPC protocol implementation instance
    * @param handlerCount RPC server handler count
    * @return RPC server
    * @throws IOException if there is an I/O error while creating RPC server
@@ -873,7 +876,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * @param conf OzoneConfiguration
    * @return OM instance
    * @throws IOException, AuthenticationException in case OM instance
-   *   creation fails.
+   *                      creation fails.
    */
   public static OzoneManager createOm(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
@@ -886,7 +889,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * @param conf OzoneConfiguration
    * @throws IOException, AuthenticationException in case login fails.
    */
-  private static void loginOMUserIfSecurityEnabled(OzoneConfiguration  conf)
+  private static void loginOMUserIfSecurityEnabled(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
     securityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf);
     if (securityEnabled) {
@@ -937,7 +940,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         return false;
       }
     } else {
-      if(OzoneSecurityUtil.isSecurityEnabled(conf) &&
+      if (OzoneSecurityUtil.isSecurityEnabled(conf) &&
           omStorage.getOmCertSerialId() == null) {
         LOG.info("OM storage is already initialized. Initializing security");
         initializeSecurity(conf, omStorage);
@@ -953,7 +956,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Initializes secure OzoneManager.
-   * */
+   */
   @VisibleForTesting
   public static void initializeSecurity(OzoneConfiguration conf,
       OMStorage omStore)
@@ -1007,7 +1010,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * Builds a message for logging startup information about an RPC server.
    *
    * @param description RPC server description
-   * @param addr RPC server listening address
+   * @param addr        RPC server listening address
    * @return server startup message
    */
   private static String buildRpcServerStartMessage(String description,
@@ -1267,7 +1270,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public void stop() {
     try {
       // Cancel the metrics timer and set to null.
-      if (metricsTimer!= null) {
+      if (metricsTimer != null) {
         metricsTimer.cancel();
         metricsTimer = null;
         scheduleOMMetricsWriteTask = null;
@@ -1321,7 +1324,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     if (shouldRun) {
       boolean running = delegationTokenMgr.isRunning()
           && blockTokenMgr.isRunning();
-      if(!running){
+      if (!running) {
         startSecretManager();
       }
     }
@@ -1329,7 +1332,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Get SCM signed certificate and store it using certificate client.
-   * */
+   */
   private static void getSCMSignedCert(CertificateClient client,
       OzoneConfiguration config, OMStorage omStore) throws IOException {
     CertificateSignRequest.Builder builder = client.getCSRBuilder();
@@ -1357,7 +1360,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         .setSubject(subject)
         .addIpAddress(ip);
 
-
     OMHANodeDetails haOMHANodeDetails = OMHANodeDetails.loadOMHAConfig(config);
     String serviceName =
         haOMHANodeDetails.getLocalNodeDetails().getOMServiceId();
@@ -1392,9 +1394,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
     try {
 
-
       // Store SCM CA certificate.
-      if(response.hasX509CACertificate()) {
+      if (response.hasX509CACertificate()) {
         String pemEncodedRootCert = response.getX509CACertificate();
         client.storeCertificate(pemEncodedRootCert, true, true);
         client.storeCertificate(pemEncodedCert, true);
@@ -1413,7 +1414,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   }
 
   /**
-   *
    * @return true if delegation token operation is allowed
    */
   private boolean isAllowedDelegationTokenOp() throws IOException {
@@ -1429,6 +1429,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Returns authentication method used to establish the connection.
+   *
    * @return AuthenticationMethod used to establish connection
    * @throws IOException
    */
@@ -1451,6 +1452,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Get delegation token from OzoneManager.
+   *
    * @param renewer Renewer information
    * @return delegationToken DelegationToken signed by OzoneManager
    * @throws IOException on error
@@ -1490,10 +1492,11 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Method to renew a delegationToken issued by OzoneManager.
+   *
    * @param token token to renew
    * @return new expiryTime of the token
    * @throws InvalidToken if {@code token} is invalid
-   * @throws IOException on other errors
+   * @throws IOException  on other errors
    */
   @Override
   public long renewDelegationToken(Token<OzoneTokenIdentifier> token)
@@ -1528,6 +1531,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Cancels a delegation token.
+   *
    * @param token token to cancel
    * @throws IOException on error
    */
@@ -1548,6 +1552,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           ex, TOKEN_ERROR_OTHER);
     }
   }
+
   /**
    * Creates a volume.
    *
@@ -1558,7 +1563,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public void createVolume(OmVolumeArgs args) throws IOException {
     try {
       metrics.incNumVolumeCreates();
-      if(isAclEnabled) {
+      if (isAclEnabled) {
         checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.CREATE,
             args.getVolume(), null, null);
       }
@@ -1599,6 +1604,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * A variant of checkAcls that doesn't throw exception if permission denied.
+   *
    * @return true if permission granted, false if permission denied.
    */
   private boolean hasAcls(ResourceType resType, StoreType store,
@@ -1615,9 +1621,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
   }
 
-
   /**
    * CheckAcls for the ozone object.
+   *
    * @throws OMException ResultCodes.PERMISSION_DENIED if permission denied.
    */
   @SuppressWarnings("parameternumber")
@@ -1631,6 +1637,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * CheckAcls for the ozone object.
+   *
    * @return true if permission granted, false if permission denied.
    * @throws OMException ResultCodes.PERMISSION_DENIED if permission denied
    *                     and throwOnPermissionDenied set to true.
@@ -1669,8 +1676,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   }
 
   /**
-   *
    * Return true if Ozone acl's are enabled, else false.
+   *
    * @return boolean
    */
   public boolean getAclsEnabled() {
@@ -1682,7 +1689,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   @Override
   public boolean setOwner(String volume, String owner) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.WRITE_ACL, volume,
           null, null);
     }
@@ -1707,12 +1714,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * Changes the Quota on a volume.
    *
    * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
+   * @param quota  - Quota in bytes.
    * @throws IOException
    */
   @Override
   public void setQuota(String volume, long quota) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.WRITE, volume,
           null, null);
     }
@@ -1735,7 +1742,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   /**
    * Checks if the specified user can access this volume.
    *
-   * @param volume - volume
+   * @param volume  - volume
    * @param userAcl - user acls which needs to be checked for access
    * @return true if the user has required access for the volume, false
    * otherwise
@@ -1762,7 +1769,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           OMAction.CHECK_VOLUME_ACCESS, auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(
             OMAction.CHECK_VOLUME_ACCESS, auditMap));
       }
@@ -1778,7 +1785,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   @Override
   public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.READ, volume,
           null, null);
     }
@@ -1795,7 +1802,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_VOLUME,
             auditMap));
       }
@@ -1811,7 +1818,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public void deleteVolume(String volume) throws IOException {
     try {
-      if(isAclEnabled) {
+      if (isAclEnabled) {
         checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.DELETE, volume,
             null, null);
       }
@@ -1832,10 +1839,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * Lists volumes accessible by a specific user.
    *
    * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
+   * @param prefix   - Filter prefix -- Return only entries that match this.
+   * @param prevKey  - Previous key -- List starts from the next from the
+   *                 prevkey
+   * @param maxKeys  - Max number of keys to return.
    * @return List of Volumes.
    * @throws IOException
    */
@@ -1883,7 +1890,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES,
             auditMap));
       }
@@ -1893,9 +1900,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   /**
    * Lists volume all volumes in the cluster.
    *
-   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prefix  - Filter prefix -- Return only entries that match this.
    * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
+   *                prevkey
    * @param maxKeys - Max number of keys to return.
    * @return List of Volumes.
    * @throws IOException
@@ -1913,7 +1920,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       metrics.incNumVolumeLists();
       if (!allowListAllVolumes) {
         // Only admin can list all volumes when disallowed in config
-        if(isAclEnabled) {
+        if (isAclEnabled) {
           checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.LIST,
               OzoneConsts.OZONE_ROOT, null, null);
         }
@@ -1926,7 +1933,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES,
             auditMap));
       }
@@ -1942,7 +1949,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public void createBucket(OmBucketInfo bucketInfo) throws IOException {
     try {
-      if(isAclEnabled) {
+      if (isAclEnabled) {
         checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.CREATE,
             bucketInfo.getVolumeName(), bucketInfo.getBucketName(), null);
       }
@@ -1966,7 +1973,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public List<OmBucketInfo> listBuckets(String volumeName,
       String startKey, String prefix, int maxNumOfBuckets)
       throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.LIST, volumeName,
           null, null);
     }
@@ -1987,7 +1994,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_BUCKETS,
             auditMap));
       }
@@ -2005,7 +2012,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public OmBucketInfo getBucketInfo(String volume, String bucket)
       throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume,
           bucket, null);
     }
@@ -2022,7 +2029,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_BUCKET,
             auditMap));
       }
@@ -2038,7 +2045,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   @Override
   public OpenKeySession openKey(OmKeyArgs args) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       try {
         checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
             args.getVolumeName(), args.getBucketName(), args.getKeyName());
@@ -2064,7 +2071,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           (args == null) ? null : args.toAuditMap(), ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
             OMAction.ALLOCATE_KEY, (args == null) ? null : args.toAuditMap()));
       }
@@ -2074,7 +2081,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public void commitKey(OmKeyArgs args, long clientID)
       throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       try {
         checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
             args.getVolumeName(), args.getBucketName(), args.getKeyName());
@@ -2118,7 +2125,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID,
       ExcludeList excludeList) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       try {
         checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
             args.getVolumeName(), args.getBucketName(), args.getKeyName());
@@ -2147,7 +2154,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
             OMAction.ALLOCATE_BLOCK, auditMap));
       }
@@ -2163,7 +2170,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   @Override
   public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ,
           args.getVolumeName(), args.getBucketName(), args.getKeyName());
     }
@@ -2178,7 +2185,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           (args == null) ? null : args.toAuditMap(), ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY,
             (args == null) ? null : args.toAuditMap()));
       }
@@ -2187,7 +2194,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   @Override
   public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE,
           args.getVolumeName(), args.getBucketName(), args.getKeyName());
     }
@@ -2216,7 +2223,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public void deleteKey(OmKeyArgs args) throws IOException {
     try {
-      if(isAclEnabled) {
+      if (isAclEnabled) {
         checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.DELETE,
             args.getVolumeName(), args.getBucketName(), args.getKeyName());
       }
@@ -2236,7 +2243,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
       String startKey, String keyPrefix, int maxKeys) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.BUCKET,
           StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix);
     }
@@ -2257,7 +2264,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_KEYS,
             auditMap));
       }
@@ -2308,7 +2315,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public void setBucketProperty(OmBucketArgs args)
       throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE,
           args.getVolumeName(), args.getBucketName(), null);
     }
@@ -2354,7 +2361,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
   }
 
-  private Map<String, String> buildAuditMap(String volume){
+  private Map<String, String> buildAuditMap(String volume) {
     Map<String, String> auditMap = new LinkedHashMap<>();
     auditMap.put(OzoneConsts.VOLUME, volume);
     return auditMap;
@@ -2507,7 +2514,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           .setNodeType(HddsProtos.NodeType.DATANODE)
           .setHostname(datanode.getHostName());
 
-      if(DatanodeDetails.getFromProtoBuf(datanode)
+      if (DatanodeDetails.getFromProtoBuf(datanode)
           .getPort(DatanodeDetails.Port.Name.REST) != null) {
         dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
             .setType(ServicePort.Type.HTTP)
@@ -2536,13 +2543,13 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   /**
    * {@inheritDoc}
    */
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException{
+  public S3SecretValue getS3Secret(String kerberosID) throws IOException {
     UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
 
     // Check whether user name passed is matching with the current user or not.
     if (!user.getUserName().equals(kerberosID)) {
       throw new OMException("User mismatch. Requested user name is " +
-          "mismatched " + kerberosID +", with current user " +
+          "mismatched " + kerberosID + ", with current user " +
           user.getUserName(), OMException.ResultCodes.USER_MISMATCH);
     }
     return s3SecretManager.getS3Secret(kerberosID);
@@ -2580,12 +2587,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       auditSuccess = true;
     } catch (IOException ex) {
       AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
-              .INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : keyArgs
+          .INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : keyArgs
           .toAuditMap(), ex));
       metrics.incNumCommitMultipartUploadPartFails();
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
             OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, (keyArgs == null) ? null :
                 keyArgs.toAuditMap()));
@@ -2641,7 +2648,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public OmMultipartUploadListParts listParts(String volumeName,
       String bucketName, String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException {
+      int maxParts) throws IOException {
     Map<String, String> auditMap = new HashMap<>();
     auditMap.put(OzoneConsts.VOLUME, volumeName);
     auditMap.put(OzoneConsts.BUCKET, bucketName);
@@ -2690,7 +2697,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       throw ex;
     }
 
-
   }
 
   @Override
@@ -2770,7 +2776,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           (args == null) ? null : args.toAuditMap(), ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
             OMAction.CREATE_FILE, (args == null) ? null : args.toAuditMap()));
       }
@@ -2779,7 +2785,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   @Override
   public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ,
           args.getVolumeName(), args.getBucketName(), args.getKeyName());
     }
@@ -2794,7 +2800,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           (args == null) ? null : args.toAuditMap(), ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
             OMAction.LOOKUP_FILE, (args == null) ? null : args.toAuditMap()));
       }
@@ -2804,7 +2810,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   @Override
   public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,
       String startKey, long numEntries) throws IOException {
-    if(isAclEnabled) {
+    if (isAclEnabled) {
       checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ,
           args.getVolumeName(), args.getBucketName(), args.getKeyName());
     }
@@ -2819,7 +2825,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           (args == null) ? null : args.toAuditMap(), ex));
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         AUDIT.logReadSuccess(buildAuditMessageForSuccess(
             OMAction.LIST_STATUS, (args == null) ? null : args.toAuditMap()));
       }
@@ -2829,11 +2835,11 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   private void auditAcl(OzoneObj ozoneObj, List<OzoneAcl> ozoneAcl,
       OMAction omAction, Exception ex) {
     Map<String, String> auditMap = ozoneObj.toAuditMap();
-    if(ozoneAcl != null) {
+    if (ozoneAcl != null) {
       auditMap.put(OzoneConsts.ACL, ozoneAcl.toString());
     }
 
-    if(ex == null) {
+    if (ex == null) {
       AUDIT.logWriteSuccess(
           buildAuditMessageForSuccess(omAction, auditMap));
     } else {
@@ -2854,8 +2860,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
     boolean auditSuccess = true;
 
-    try{
-      if(isAclEnabled) {
+    try {
+      if (isAclEnabled) {
         checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL,
             obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
       }
@@ -2872,12 +2878,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         throw new OMException("Unexpected resource type: " +
             obj.getResourceType(), INVALID_REQUEST);
       }
-    } catch(Exception ex) {
+    } catch (Exception ex) {
       auditSuccess = false;
       auditAcl(obj, Arrays.asList(acl), OMAction.ADD_ACL, ex);
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         auditAcl(obj, Arrays.asList(acl), OMAction.ADD_ACL, null);
       }
     }
@@ -2895,8 +2901,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
     boolean auditSuccess = true;
 
-    try{
-      if(isAclEnabled) {
+    try {
+      if (isAclEnabled) {
         checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL,
             obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
       }
@@ -2914,12 +2920,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         throw new OMException("Unexpected resource type: " +
             obj.getResourceType(), INVALID_REQUEST);
       }
-    } catch(Exception ex) {
+    } catch (Exception ex) {
       auditSuccess = false;
       auditAcl(obj, Arrays.asList(acl), OMAction.REMOVE_ACL, ex);
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         auditAcl(obj, Arrays.asList(acl), OMAction.REMOVE_ACL, null);
       }
     }
@@ -2929,7 +2935,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * Acls to be set for given Ozone object. This operations reset ACL for given
    * object to list of ACLs provided in argument.
    *
-   * @param obj Ozone object.
+   * @param obj  Ozone object.
    * @param acls List of acls.
    * @throws IOException if there is error.
    */
@@ -2937,8 +2943,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
     boolean auditSuccess = true;
 
-    try{
-      if(isAclEnabled) {
+    try {
+      if (isAclEnabled) {
         checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL,
             obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
       }
@@ -2955,12 +2961,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         throw new OMException("Unexpected resource type: " +
             obj.getResourceType(), INVALID_REQUEST);
       }
-    } catch(Exception ex) {
+    } catch (Exception ex) {
       auditSuccess = false;
       auditAcl(obj, acls, OMAction.SET_ACL, ex);
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         auditAcl(obj, acls, OMAction.SET_ACL, null);
       }
     }
@@ -2976,8 +2982,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
     boolean auditSuccess = true;
 
-    try{
-      if(isAclEnabled) {
+    try {
+      if (isAclEnabled) {
         checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.READ_ACL,
             obj.getVolumeName(), obj.getBucketName(), obj.getKeyName());
       }
@@ -2995,12 +3001,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         throw new OMException("Unexpected resource type: " +
             obj.getResourceType(), INVALID_REQUEST);
       }
-    } catch(Exception ex) {
+    } catch (Exception ex) {
       auditSuccess = false;
       auditAcl(obj, null, OMAction.GET_ACL, ex);
       throw ex;
     } finally {
-      if(auditSuccess){
+      if (auditSuccess) {
         auditAcl(obj, null, OMAction.GET_ACL, null);
       }
     }
@@ -3012,6 +3018,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * last applied transaction index, then re-initialize the OM state via this
    * checkpoint. Before re-initializing OM state, the OM Ratis server should
    * be stopped so that no new transactions can be applied.
+   *
    * @param leaderId peerNodeID of the leader OM
    * @return If checkpoint is installed, return the corresponding termIndex.
    * Otherwise, return null.
@@ -3036,15 +3043,17 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         omDBcheckpoint.getRatisSnapshotTerm();
     if (checkpointSnapshotIndex <= lastAppliedIndex) {
       LOG.error("Failed to install checkpoint from OM leader: {}. The last " +
-          "applied index: {} is greater than or equal to the checkpoint's " +
-          "snapshot index: {}. Deleting the downloaded checkpoint {}", leaderId,
+              "applied index: {} is greater than or equal to the checkpoint's"
+              + " " +
+              "snapshot index: {}. Deleting the downloaded checkpoint {}",
+          leaderId,
           lastAppliedIndex, checkpointSnapshotIndex,
           newDBlocation);
       try {
         FileUtils.deleteFully(newDBlocation);
       } catch (IOException e) {
         LOG.error("Failed to fully delete the downloaded DB checkpoint {} " +
-            "from OM leader {}.", newDBlocation,
+                "from OM leader {}.", newDBlocation,
             leaderId, e);
       }
       return null;
@@ -3097,6 +3106,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Download the latest OM DB checkpoint from the leader OM.
+   *
    * @param leaderId OMNodeID of the leader OM node.
    * @return latest DB checkpoint from leader OM.
    */
@@ -3114,8 +3124,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Replace the current OM DB with the new DB checkpoint.
+   *
    * @param lastAppliedIndex the last applied index in the current OM DB.
-   * @param checkpointPath path to the new DB checkpoint
+   * @param checkpointPath   path to the new DB checkpoint
    * @return location of the backup of the original DB
    * @throws Exception
    */
@@ -3144,7 +3155,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       Files.move(checkpointPath, db.toPath());
     } catch (IOException e) {
       LOG.error("Failed to move downloaded DB checkpoint {} to metadata " +
-          "directory {}. Resetting to original DB.", checkpointPath,
+              "directory {}. Resetting to original DB.", checkpointPath,
           db.toPath());
       Files.move(dbBackup.toPath(), db.toPath());
       throw e;
@@ -3185,7 +3196,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         TermIndex.newTermIndex(newSnapShotTermIndex, newSnapshotIndex));
   }
 
-  public static  Logger getLogger() {
+  public static Logger getLogger() {
     return LOG;
   }
 
@@ -3219,9 +3230,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     return omComponent;
   }
 
-
   /**
    * Return maximum volumes count per user.
+   *
    * @return maxUserVolumeCount
    */
   public long getMaxUserVolumeCount() {
@@ -3235,7 +3246,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * If it is the leader, the role status is cached till Ratis server
    * notifies of leader change. If it is not leader, the role information is
    * retrieved through by submitting a GroupInfoRequest to Ratis server.
-   *
+   * <p>
    * If ratis is not enabled, then it always returns true.
    *
    * @return Return true if this node is the leader, false otherwsie.
@@ -3246,6 +3257,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Return if Ratis is enabled or not.
+   *
    * @return
    */
   public boolean isRatisEnabled() {
@@ -3254,6 +3266,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Get DB updates since a specific sequence number.
+   *
    * @param dbUpdatesRequest request that encapsulates a sequence number.
    * @return Wrapper containing the updates.
    * @throws SequenceNumberNotFoundException if db is unable to read the data.
@@ -3289,6 +3302,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   /**
    * Returns true if OzoneNativeAuthorizer is enabled and false if otherwise.
+   *
    * @return if native authorizer is enabled.
    */
   public boolean isNativeAuthorizerEnabled() {
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml
similarity index 71%
copy from hadoop-ozone/ozonefs/pom.xml
copy to hadoop-ozone/ozonefs-common/pom.xml
index 8cc2434..d636b1f 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs-common/pom.xml
@@ -21,8 +21,8 @@
     <artifactId>hadoop-ozone</artifactId>
     <version>0.6.0-SNAPSHOT</version>
   </parent>
-  <artifactId>hadoop-ozone-filesystem</artifactId>
-  <name>Apache Hadoop Ozone FileSystem</name>
+  <artifactId>hadoop-ozone-filesystem-common</artifactId>
+  <name>Apache Hadoop Ozone FileSystem Common</name>
   <packaging>jar</packaging>
   <version>0.6.0-SNAPSHOT</version>
   <properties>
@@ -32,46 +32,7 @@
 
   <build>
     <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>deplist</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>list</goal>
-            </goals>
-            <configuration>
-              <!-- build a shellprofile -->
-              <outputFile>
-                ${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt
-              </outputFile>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
+
     </plugins>
   </build>
 
@@ -81,9 +42,13 @@
       <artifactId>hadoop-hdds-hadoop-dependency-client</artifactId>
     </dependency>
     <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-test-utils</artifactId>
-      <scope>test</scope>
+      <artifactId>hadoop-ozone-client</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -105,24 +70,18 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
+      <artifactId>hadoop-hdds-test-utils</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
+      <artifactId>hadoop-ozone-ozone-manager</artifactId>
       <scope>test</scope>
     </dependency>
-     <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
+      <artifactId>hadoop-hdds-container-service</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -134,7 +93,7 @@
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
-      <version>${mockito1-powermock.version}</version>
+      <version>1.10.19</version>
       <scope>test</scope>
     </dependency>
     <dependency>
@@ -145,12 +104,13 @@
     <dependency>
       <groupId>org.powermock</groupId>
       <artifactId>powermock-module-junit4</artifactId>
-      <version>${powermock1.version}</version>
+      <version>1.6.5</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.powermock</groupId>
       <artifactId>powermock-api-mockito</artifactId>
+      <version>1.6.5</version>
       <scope>test</scope>
     </dependency>
   </dependencies>
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
similarity index 100%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
copy to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
similarity index 96%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index b7323ac..1525dab 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -18,20 +18,7 @@
 
 package org.apache.hadoop.fs.ozone;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Objects;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CreateFlag;
@@ -53,16 +40,29 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
+import org.apache.http.client.utils.URIBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
-import com.google.common.base.Preconditions;
 import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
-import org.apache.http.client.utils.URIBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The minimal Ozone Filesystem implementation.
@@ -145,18 +145,6 @@ public class BasicOzoneFileSystem extends FileSystem {
           .build();
       LOG.trace("Ozone URI for ozfs initialization is {}", uri);
 
-      //isolated is the default for ozonefs-lib-legacy which includes the
-      // /ozonefs.txt, otherwise the default is false. It could be overridden.
-      boolean defaultValue =
-          BasicOzoneFileSystem.class.getClassLoader()
-              .getResource("ozonefs.txt")
-              != null;
-
-      //Use string here instead of the constant as constant may not be available
-      //on the classpath of a hadoop 2.7
-      boolean isolatedClassloader =
-          conf.getBoolean("ozone.fs.isolated-classloader", defaultValue);
-
       ConfigurationSource source;
       if (conf instanceof OzoneConfiguration) {
         source = (ConfigurationSource) conf;
@@ -165,8 +153,7 @@ public class BasicOzoneFileSystem extends FileSystem {
       }
       this.adapter =
           createAdapter(source, bucketStr,
-              volumeStr, omHost, omPort,
-          isolatedClassloader);
+              volumeStr, omHost, omPort);
 
       try {
         this.userName =
@@ -186,19 +173,10 @@ public class BasicOzoneFileSystem extends FileSystem {
 
   protected OzoneClientAdapter createAdapter(ConfigurationSource conf,
       String bucketStr,
-      String volumeStr, String omHost, int omPort,
-      boolean isolatedClassloader) throws IOException {
+      String volumeStr, String omHost, int omPort) throws IOException {
 
-    if (isolatedClassloader) {
-
-      return OzoneClientAdapterFactory
-          .createAdapter(volumeStr, bucketStr);
-
-    } else {
-
-      return new BasicOzoneClientAdapterImpl(omHost, omPort, conf,
-          volumeStr, bucketStr);
-    }
+    return new BasicOzoneClientAdapterImpl(omHost, omPort, conf,
+        volumeStr, bucketStr);
   }
 
   @Override
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java
similarity index 100%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java
copy to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
similarity index 100%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
copy to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java
similarity index 100%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java
copy to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
rename to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
similarity index 61%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
copy to hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index 832a0cb..cb90e5f 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -14,29 +14,17 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
+ *
  */
 
-package org.apache.hadoop.fs.ozone;
-
 /**
- * Constants for Ozone FileSystem implementation.
+ * Ozone Filesystem.
+ *
+ * Except for the exceptions, it should all be hidden as implementation details.
  */
-public final class Constants {
-
-  public static final String OZONE_DEFAULT_USER = "hdfs";
-
-  public static final String OZONE_USER_DIR = "/user";
-
-  /** Local buffer directory. */
-  public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir";
-
-  /** Temporary directory. */
-  public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
-
-  /** Page size for Ozone listing operation. */
-  public static final int LISTING_PAGE_SIZE = 1024;
-
-  private Constants() {
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+package org.apache.hadoop.fs.ozone;
 
-  }
-}
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
rename to hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
similarity index 100%
rename from hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
rename to hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
diff --git a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
similarity index 100%
copy from hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
copy to hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
diff --git a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties b/hadoop-ozone/ozonefs-common/src/test/resources/log4j.properties
similarity index 100%
rename from hadoop-ozone/ozonefs/src/test/resources/log4j.properties
rename to hadoop-ozone/ozonefs-common/src/test/resources/log4j.properties
diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml
new file mode 100644
index 0000000..06ea2e2
--- /dev/null
+++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml
@@ -0,0 +1,105 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-ozone</artifactId>
+    <version>0.6.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-ozone-filesystem-hadoop2</artifactId>
+  <name>Apache Hadoop Ozone FileSystem Hadoop 2.x compatibility</name>
+  <packaging>jar</packaging>
+  <version>0.6.0-SNAPSHOT</version>
+  <properties>
+    <shaded.prefix>org.apache.hadoop.ozone.shaded</shaded.prefix>
+  </properties>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-filesystem-shaded</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+      <version>2.7.7</version>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>include-dependencies</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>unpack</goal>
+            </goals>
+            <configuration>
+              <excludes>META-INF/versions/**/*.*</excludes>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.apache.hadoop</groupId>
+                  <artifactId>hadoop-ozone-filesystem-shaded</artifactId>
+                  <version>${project.version}</version>
+                </artifactItem>
+              </artifactItems>
+              <outputDirectory>target/classes</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>animal-sniffer-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>signature-check</id>
+            <phase/>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/Hadoop27OmTransportFactory.java
similarity index 59%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java
rename to hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/Hadoop27OmTransportFactory.java
index cef6a58..66794b2 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/Hadoop27OmTransportFactory.java
@@ -17,26 +17,22 @@
  */
 package org.apache.hadoop.fs.ozone;
 
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.apache.hadoop.fs.StreamCapabilities;
-import org.apache.hadoop.util.StringUtils;
+import java.io.IOException;
 
-import java.io.InputStream;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
+import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory;
+import org.apache.hadoop.security.UserGroupInformation;
 
-final class CapableOzoneFSInputStream extends OzoneFSInputStream
-    implements StreamCapabilities {
-
-  CapableOzoneFSInputStream(InputStream inputStream, Statistics statistics) {
-    super(inputStream, statistics);
-  }
+/**
+ * OM Transport factory to create Simple (NON-HA) OM transport client.
+ */
+public class Hadoop27OmTransportFactory implements OmTransportFactory {
 
   @Override
-  public boolean hasCapability(String capability) {
-    switch (StringUtils.toLowerCase(capability)) {
-    case OzoneStreamCapabilities.READBYTEBUFFER:
-      return true;
-    default:
-      return false;
-    }
+  public OmTransport createOmTransport(ConfigurationSource source,
+      UserGroupInformation ugi, String omServiceId) throws IOException {
+    return new Hadoop27RpcTransport(source);
   }
+
 }
diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/Hadoop27RpcTransport.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/Hadoop27RpcTransport.java
new file mode 100644
index 0000000..fc1c97f
--- /dev/null
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/Hadoop27RpcTransport.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * Hadoop RPC based transport (wihout HA support).
+ */
+public class Hadoop27RpcTransport implements OmTransport {
+
+  private static final RpcController NULL_RPC_CONTROLLER = null;
+
+  private final OzoneManagerProtocolPB proxy;
+
+  public Hadoop27RpcTransport(
+      ConfigurationSource conf) throws IOException {
+    InetSocketAddress socket = OmUtils.getOmAddressForClients(conf);
+    long version = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+    OzoneConfiguration ozoneConfiguration = OzoneConfiguration.of(conf);
+
+    RPC.setProtocolEngine(ozoneConfiguration,
+        OzoneManagerProtocolPB.class,
+        ProtobufRpcEngine.class);
+    proxy = RPC.getProtocolProxy(OzoneManagerProtocolPB.class, version,
+        socket, UserGroupInformation.getCurrentUser(),
+        ozoneConfiguration,
+        NetUtils.getDefaultSocketFactory(ozoneConfiguration),
+        RPC.getRpcTimeout(ozoneConfiguration), null).getProxy();
+  }
+
+  @Override
+  public OMResponse submitRequest(OMRequest payload) throws IOException {
+    try {
+      return proxy.submitRequest(NULL_RPC_CONTROLLER, payload);
+    } catch (ServiceException e) {
+      throw new IOException("Service exception during the OM call", e);
+    }
+  }
+
+  @Override
+  public Text getDelegationTokenService() {
+    return null;
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+
+}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
similarity index 83%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
copy to hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
index acab6d1..914832e 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
@@ -6,16 +6,15 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.fs.ozone;
 
 import java.io.IOException;
@@ -34,16 +33,11 @@ import org.apache.hadoop.ozone.OzoneConsts;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class BasicOzFs extends DelegateToFileSystem {
+public class OzFs extends DelegateToFileSystem {
 
-  public BasicOzFs(URI theUri, Configuration conf)
+  public OzFs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
-    super(theUri, new BasicOzoneFileSystem(), conf,
+    super(theUri, new OzoneFileSystem(), conf,
         OzoneConsts.OZONE_URI_SCHEME, false);
   }
-
-  @Override
-  public int getUriDefaultPort() {
-    return -1;
-  }
 }
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
similarity index 64%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java
rename to hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index db90cd9..751808d 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneStreamCapabilities.java
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -17,22 +17,9 @@
  */
 package org.apache.hadoop.fs.ozone;
 
-import java.nio.ByteBuffer;
-
 /**
- * Utility class to query streams for supported capabilities of Ozone.
- * Capability strings must be in lower case.
+ * Minimal Ozone File System compatible with Hadoop 2.x.
  */
-final class OzoneStreamCapabilities {
-
-  private OzoneStreamCapabilities() {
-  }
+public class OzoneFileSystem extends BasicOzoneFileSystem {
 
-  /**
-   * Stream read(ByteBuffer) capability implemented by
-   * {@link OzoneFSInputStream#read(ByteBuffer)}.
-   *
-   * TODO: If Hadoop dependency is upgraded, this string can be removed.
-   */
-  static final String READBYTEBUFFER = "in:readbytebuffer";
 }
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
similarity index 61%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
copy to hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index 832a0cb..cb90e5f 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -14,29 +14,17 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
+ *
  */
 
-package org.apache.hadoop.fs.ozone;
-
 /**
- * Constants for Ozone FileSystem implementation.
+ * Ozone Filesystem.
+ *
+ * Except for the exceptions, it should all be hidden as implementation details.
  */
-public final class Constants {
-
-  public static final String OZONE_DEFAULT_USER = "hdfs";
-
-  public static final String OZONE_USER_DIR = "/user";
-
-  /** Local buffer directory. */
-  public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir";
-
-  /** Temporary directory. */
-  public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
-
-  /** Page size for Ozone listing operation. */
-  public static final int LISTING_PAGE_SIZE = 1024;
-
-  private Constants() {
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+package org.apache.hadoop.fs.ozone;
 
-  }
-}
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
similarity index 100%
rename from hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
rename to hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory
similarity index 93%
rename from hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
rename to hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory
index 39ca348..b1000f7 100644
--- a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
+org.apache.hadoop.fs.ozone.Hadoop27OmTransportFactory
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml
new file mode 100644
index 0000000..1c0d774
--- /dev/null
+++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-ozone</artifactId>
+    <version>0.6.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-ozone-filesystem-hadoop3</artifactId>
+  <name>Apache Hadoop Ozone FileSystem Hadoop 3.x compatibility</name>
+  <packaging>jar</packaging>
+  <version>0.6.0-SNAPSHOT</version>
+  <properties>
+    <shaded.prefix>org.apache.hadoop.ozone.shaded</shaded.prefix>
+  </properties>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-filesystem</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>include-dependencies</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>unpack</goal>
+            </goals>
+            <configuration>
+              <excludes>META-INF/versions/**/*.*</excludes>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.apache.hadoop</groupId>
+                  <artifactId>hadoop-ozone-filesystem-shaded</artifactId>
+                  <version>${project.version}</version>
+                </artifactItem>
+              </artifactItems>
+              <outputDirectory>target/classes</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>animal-sniffer-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>signature-check</id>
+            <phase/>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
similarity index 83%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
rename to hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
index acab6d1..914832e 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java
+++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
@@ -6,16 +6,15 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.fs.ozone;
 
 import java.io.IOException;
@@ -34,16 +33,11 @@ import org.apache.hadoop.ozone.OzoneConsts;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class BasicOzFs extends DelegateToFileSystem {
+public class OzFs extends DelegateToFileSystem {
 
-  public BasicOzFs(URI theUri, Configuration conf)
+  public OzFs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
-    super(theUri, new BasicOzoneFileSystem(), conf,
+    super(theUri, new OzoneFileSystem(), conf,
         OzoneConsts.OZONE_URI_SCHEME, false);
   }
-
-  @Override
-  public int getUriDefaultPort() {
-    return -1;
-  }
 }
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
similarity index 82%
copy from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
copy to hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 20dd72f..c3e308b 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -22,14 +22,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
@@ -47,6 +46,10 @@ public class OzoneFileSystem extends BasicOzoneFileSystem
 
   private OzoneFSStorageStatistics storageStatistics;
 
+  public OzoneFileSystem() {
+    this.storageStatistics = new OzoneFSStorageStatistics();
+  }
+
   @Override
   public KeyProvider getKeyProvider() throws IOException {
     return getAdapter().getKeyProvider();
@@ -86,23 +89,11 @@ public class OzoneFileSystem extends BasicOzoneFileSystem
 
   @Override
   protected OzoneClientAdapter createAdapter(ConfigurationSource conf,
-      String bucketStr,
-      String volumeStr, String omHost, int omPort,
-      boolean isolatedClassloader) throws IOException {
-
-    this.storageStatistics =
-        (OzoneFSStorageStatistics) GlobalStorageStatistics.INSTANCE
-            .put(OzoneFSStorageStatistics.NAME,
-                OzoneFSStorageStatistics::new);
-
-    if (isolatedClassloader) {
-      return OzoneClientAdapterFactory.createAdapter(volumeStr, bucketStr,
-          storageStatistics);
-
-    } else {
-      return new OzoneClientAdapterImpl(omHost, omPort, conf,
-          volumeStr, bucketStr, storageStatistics);
-    }
+      String bucketStr, String volumeStr, String omHost, int omPort)
+      throws IOException {
+    return new OzoneClientAdapterImpl(omHost, omPort, conf, volumeStr,
+        bucketStr,
+        storageStatistics);
   }
 
   @Override
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
similarity index 61%
rename from hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
rename to hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index 832a0cb..cb90e5f 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
+++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -14,29 +14,17 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
+ *
  */
 
-package org.apache.hadoop.fs.ozone;
-
 /**
- * Constants for Ozone FileSystem implementation.
+ * Ozone Filesystem.
+ *
+ * Except for the exceptions, it should all be hidden as implementation details.
  */
-public final class Constants {
-
-  public static final String OZONE_DEFAULT_USER = "hdfs";
-
-  public static final String OZONE_USER_DIR = "/user";
-
-  /** Local buffer directory. */
-  public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir";
-
-  /** Temporary directory. */
-  public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
-
-  /** Page size for Ozone listing operation. */
-  public static final int LISTING_PAGE_SIZE = 1024;
-
-  private Constants() {
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+package org.apache.hadoop.fs.ozone;
 
-  }
-}
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
similarity index 100%
copy from hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
copy to hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml
deleted file mode 100644
index 3fa03e4..0000000
--- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.6.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
-  <name>Apache Hadoop Ozone FileSystem Legacy Jar Library</name>
-  <description>This projects creates an uberjar from ozonefs with all the
-    dependencies, but the dependencies are located in an isolated subdir
-    and loaded by a custom class loader. Can be used together with Hadoop 2.x
-  </description>
-  <packaging>jar</packaging>
-  <version>0.6.0-SNAPSHOT</version>
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>include-dependencies</id>
-            <goals>
-              <goal>unpack-dependencies</goal>
-            </goals>
-            <phase>prepare-package</phase>
-            <configuration>
-              <outputDirectory>target/classes/libs</outputDirectory>
-              <includeScope>compile</includeScope>
-              <excludes>META-INF/*.SF,module-info.class</excludes>
-              <excludeArtifactIds>
-                slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem
-              </excludeArtifactIds>
-              <markersDirectory>
-                ${project.build.directory}/dependency-maven-plugin-markers-lib
-              </markersDirectory>
-            </configuration>
-          </execution>
-
-          <execution>
-            <id>include-ozonefs</id>
-            <goals>
-              <goal>unpack-dependencies</goal>
-            </goals>
-            <phase>prepare-package</phase>
-            <configuration>
-              <outputDirectory>target/classes</outputDirectory>
-              <includeArtifactIds>hadoop-ozone-filesystem,hadoop-ozone-common</includeArtifactIds>
-              <includeScope>compile</includeScope>
-              <excludes>META-INF/*.SF</excludes>
-              <markersDirectory>
-                ${project.build.directory}/dependency-maven-plugin-markers-direct
-              </markersDirectory>
-            </configuration>
-          </execution>
-
-          <execution>
-            <id>include-token</id>
-            <goals>
-              <goal>unpack-dependencies</goal>
-            </goals>
-            <phase>prepare-package</phase>
-            <configuration>
-              <outputDirectory>target/classes</outputDirectory>
-              <includeArtifactIds>hadoop-ozone-common,hadoop-hdds-common</includeArtifactIds>
-              <includeScope>compile</includeScope>
-              <includes>
-                      org/apache/hadoop/ozone/security/OzoneTokenIdentifier.class,org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.class,org/apache/hadoop/ozone/protocol/proto/OzoneManagerProtocolProtos*,org/apache/hadoop/hdds/protocol/proto/HddsProtos*
-              </includes>
-              <excludes>META-INF/*.SF</excludes>
-              <markersDirectory>
-                ${project.build.directory}/dependency-maven-plugin-markers-token
-              </markersDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem</artifactId>
-      <scope>compile</scope>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt
deleted file mode 100644
index 85c1307..0000000
--- a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Apache Hadoop Ozone placeholder file.
-
-The usage of the legacy version of the uber jar can be detected based on
-the existence of this file.
diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml
similarity index 75%
rename from hadoop-ozone/ozonefs-lib-current/pom.xml
rename to hadoop-ozone/ozonefs-shaded/pom.xml
index 697596c..da8ffc8 100644
--- a/hadoop-ozone/ozonefs-lib-current/pom.xml
+++ b/hadoop-ozone/ozonefs-shaded/pom.xml
@@ -21,51 +21,51 @@
     <artifactId>hadoop-ozone</artifactId>
     <version>0.6.0-SNAPSHOT</version>
   </parent>
-  <artifactId>hadoop-ozone-filesystem-lib-current</artifactId>
-  <name>Apache Hadoop Ozone FileSystem Single Jar Library</name>
+  <artifactId>hadoop-ozone-filesystem-shaded</artifactId>
+  <name>Apache Hadoop Ozone FileSystem Shaded</name>
   <packaging>jar</packaging>
-  <description>This projects creates an uber jar from ozonefs with all the
-    dependencies.
-  </description>
   <version>0.6.0-SNAPSHOT</version>
+
   <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
     <shaded.prefix>org.apache.hadoop.ozone.shaded</shaded.prefix>
   </properties>
 
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-filesystem-common</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+  </dependencies>
   <build>
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>animal-sniffer-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>signature-check</id>
-            <phase></phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-shade-plugin</artifactId>
         <executions>
           <execution>
@@ -84,8 +84,10 @@
                     <resource>META-INF/BC2048KE.SF</resource>
                   </resources>
                 </transformer>
-                <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-                <transformer implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
+                <transformer
+                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+                <transformer
+                        implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
                   <resource>ozone-default-generated.xml</resource>
                 </transformer>
               </transformers>
@@ -168,50 +170,4 @@
       </plugin>
     </plugins>
   </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem</artifactId>
-      <scope>compile</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-common</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdds-server-framework</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdds-container-service</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs-client</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.logging.log4j</groupId>
-          <artifactId>log4j-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.logging.log4j</groupId>
-          <artifactId>log4j-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.code.findbugs</groupId>
-          <artifactId>jsr305</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-  </dependencies>
 </project>
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 8cc2434..b758b42 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -78,50 +78,10 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-hadoop-dependency-client</artifactId>
+      <artifactId>hadoop-ozone-filesystem-common</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-test-utils</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>test</scope>
-    </dependency>
-     <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-client</artifactId>
     </dependency>
     <dependency>
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
deleted file mode 100644
index e115251..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * Class loader which delegates the loading only for the selected class.
- *
- * <p>
- * By default java classloader delegates first all the class loading to the
- * parent, and loads the class only if it's not found in the class.
- * <p>
- * This simple class loader do the opposit. Everything is loaded with this
- * class loader without delegation _except_ the few classes which are defined
- * in the constructor.
- * <p>
- * With this method we can use two separated class loader (the original main
- * classloader and instance of this which loaded separated classes, but the
- * few selected classes are shared between the two class loaders.
- * <p>
- * With this approach it's possible to use any older hadoop version
- * (main classloader) together with ozonefs (instance of this classloader) as
- * only the selected classes are selected between the class loaders.
- */
-public class FilteredClassLoader extends URLClassLoader {
-
-  private final ClassLoader systemClassLoader;
-
-  private final ClassLoader delegate;
-  private Set<String> delegatedClasses = new HashSet<>();
-
-  public FilteredClassLoader(URL[] urls, ClassLoader parent) {
-    super(urls, null);
-    delegatedClasses.add("org.apache.hadoop.crypto.key.KeyProvider");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.FileStatusAdapter");
-    delegatedClasses.add("org.apache.hadoop.security.token.Token");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSStorageStatistics");
-    delegatedClasses.add("org.apache.hadoop.fs.ozone.Statistic");
-    delegatedClasses.add("org.apache.hadoop.fs.Seekable");
-    delegatedClasses.add("org.apache.hadoop.io.Text");
-    delegatedClasses.add("org.apache.hadoop.fs.Path");
-    delegatedClasses.add("org.apache.hadoop.fs.BlockLocation");
-    delegatedClasses.addAll(StringUtils.getTrimmedStringCollection(
-        System.getenv("HADOOP_OZONE_DELEGATED_CLASSES")));
-    this.delegate = parent;
-    systemClassLoader = getSystemClassLoader();
-
-  }
-
-  @Override
-  public Class<?> loadClass(String name) throws ClassNotFoundException {
-    if (delegatedClasses.contains(name) ||
-        name.startsWith("org.apache.log4j") ||
-        name.startsWith("org.slf4j")) {
-      return delegate.loadClass(name);
-    }
-    return super.loadClass(name);
-  }
-
-  private Class<?> loadFromSystem(String name) {
-    if (systemClassLoader != null) {
-      try {
-        return systemClassLoader.loadClass(name);
-      } catch (ClassNotFoundException ex) {
-        //no problem
-        return null;
-      }
-    } else {
-      return null;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
deleted file mode 100644
index 4442c63..0000000
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-
-import org.apache.hadoop.fs.StorageStatistics;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Creates OzoneClientAdapter with classloader separation.
- */
-public final class OzoneClientAdapterFactory {
-
-  static final Logger LOG =
-      LoggerFactory.getLogger(OzoneClientAdapterFactory.class);
-
-  private OzoneClientAdapterFactory() {
-  }
-
-  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
-  public static OzoneClientAdapter createAdapter(
-      String volumeStr,
-      String bucketStr) throws IOException {
-    return createAdapter(volumeStr, bucketStr, true,
-        (aClass) -> (OzoneClientAdapter) aClass
-            .getConstructor(String.class, String.class)
-            .newInstance(
-                volumeStr,
-                bucketStr));
-  }
-
-  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
-  public static OzoneClientAdapter createAdapter(
-      String volumeStr,
-      String bucketStr,
-      StorageStatistics storageStatistics) throws IOException {
-    return createAdapter(volumeStr, bucketStr, false,
-        (aClass) -> (OzoneClientAdapter) aClass
-            .getConstructor(String.class, String.class,
-                OzoneFSStorageStatistics.class)
-            .newInstance(
-                volumeStr,
-                bucketStr,
-                storageStatistics));
-  }
-
-  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
-  public static OzoneClientAdapter createAdapter(
-      String volumeStr,
-      String bucketStr,
-      boolean basic,
-      OzoneClientAdapterCreator creator) throws IOException {
-
-    ClassLoader currentClassLoader =
-        OzoneClientAdapterFactory.class.getClassLoader();
-    List<URL> urls = new ArrayList<>();
-
-    findEmbeddedLibsUrl(urls, currentClassLoader);
-
-    findConfigDirUrl(urls, currentClassLoader);
-
-    ClassLoader classLoader =
-        new FilteredClassLoader(urls.toArray(new URL[0]), currentClassLoader);
-
-    try {
-
-      ClassLoader contextClassLoader =
-          Thread.currentThread().getContextClassLoader();
-      Thread.currentThread().setContextClassLoader(classLoader);
-
-      //this class caches the context classloader during the first load
-      //call it here when the context class loader is set to the isoloated
-      //loader to make sure the grpc class will be loaded by the right
-      //loader
-      Class<?> reflectionUtils =
-          classLoader.loadClass("org.apache.ratis.util.ReflectionUtils");
-      reflectionUtils.getMethod("getClassByName", String.class)
-          .invoke(null, "org.apache.ratis.grpc.GrpcFactory");
-
-      Class<?> adapterClass = null;
-      if (basic) {
-        adapterClass = classLoader
-            .loadClass(
-                "org.apache.hadoop.fs.ozone.BasicOzoneClientAdapterImpl");
-      } else {
-        adapterClass = classLoader
-            .loadClass(
-                "org.apache.hadoop.fs.ozone.OzoneClientAdapterImpl");
-      }
-      OzoneClientAdapter ozoneClientAdapter =
-          creator.createOzoneClientAdapter(adapterClass);
-
-      Thread.currentThread().setContextClassLoader(contextClassLoader);
-
-      return ozoneClientAdapter;
-    } catch (Exception e) {
-      LOG.error("Can't initialize the ozoneClientAdapter", e);
-      throw new IOException(
-          "Can't initialize the OzoneClientAdapter implementation", e);
-    }
-  }
-
-  private static void findConfigDirUrl(List<URL> urls,
-      ClassLoader currentClassLoader) throws IOException {
-    Enumeration<URL> conf =
-        currentClassLoader.getResources("ozone-site.xml");
-    while (conf.hasMoreElements()) {
-      urls.add(
-          new URL(
-              conf.nextElement().toString().replace("ozone-site.xml", "")));
-
-    }
-  }
-
-  private static void findEmbeddedLibsUrl(List<URL> urls,
-      ClassLoader currentClassloader)
-      throws MalformedURLException {
-
-    //marker file is added to the jar to make it easier to find the URL
-    // for the current jar.
-    String markerFile = "ozonefs.txt";
-    ClassLoader currentClassLoader =
-        OzoneClientAdapterFactory.class.getClassLoader();
-
-    URL ozFs = currentClassLoader
-        .getResource(markerFile);
-    String rootPath = ozFs.toString().replace(markerFile, "");
-    urls.add(new URL(rootPath));
-
-    urls.add(new URL(rootPath + "libs/"));
-
-  }
-
-  /**
-   * Interface to create OzoneClientAdapter implementation with reflection.
-   */
-  @FunctionalInterface
-  interface OzoneClientAdapterCreator {
-    OzoneClientAdapter createOzoneClientAdapter(Class<?> clientAdapter)
-        throws NoSuchMethodException, IllegalAccessException,
-        InvocationTargetException, InstantiationException;
-  }
-
-}
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 20dd72f..c3e308b 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -22,14 +22,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
@@ -47,6 +46,10 @@ public class OzoneFileSystem extends BasicOzoneFileSystem
 
   private OzoneFSStorageStatistics storageStatistics;
 
+  public OzoneFileSystem() {
+    this.storageStatistics = new OzoneFSStorageStatistics();
+  }
+
   @Override
   public KeyProvider getKeyProvider() throws IOException {
     return getAdapter().getKeyProvider();
@@ -86,23 +89,11 @@ public class OzoneFileSystem extends BasicOzoneFileSystem
 
   @Override
   protected OzoneClientAdapter createAdapter(ConfigurationSource conf,
-      String bucketStr,
-      String volumeStr, String omHost, int omPort,
-      boolean isolatedClassloader) throws IOException {
-
-    this.storageStatistics =
-        (OzoneFSStorageStatistics) GlobalStorageStatistics.INSTANCE
-            .put(OzoneFSStorageStatistics.NAME,
-                OzoneFSStorageStatistics::new);
-
-    if (isolatedClassloader) {
-      return OzoneClientAdapterFactory.createAdapter(volumeStr, bucketStr,
-          storageStatistics);
-
-    } else {
-      return new OzoneClientAdapterImpl(omHost, omPort, conf,
-          volumeStr, bucketStr, storageStatistics);
-    }
+      String bucketStr, String volumeStr, String omHost, int omPort)
+      throws IOException {
+    return new OzoneClientAdapterImpl(omHost, omPort, conf, volumeStr,
+        bucketStr,
+        storageStatistics);
   }
 
   @Override
diff --git a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
similarity index 100%
rename from hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
rename to hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java
deleted file mode 100644
index 26a77eb..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.api.mockito.PowerMockito;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.when;
-
-/**
- * FilteredClassLoader test using mocks.
- */
-@RunWith(PowerMockRunner.class)
-@PrepareForTest({ FilteredClassLoader.class, OzoneFSInputStream.class})
-public class TestFilteredClassLoader {
-  @Test
-  public void testFilteredClassLoader() {
-    PowerMockito.mockStatic(System.class);
-    when(System.getenv("HADOOP_OZONE_DELEGATED_CLASSES"))
-        .thenReturn("org.apache.hadoop.fs.ozone.OzoneFSInputStream");
-
-    ClassLoader currentClassLoader =
-        TestFilteredClassLoader.class.getClassLoader();
-
-    List<URL> urls = new ArrayList<>();
-    ClassLoader classLoader = new FilteredClassLoader(
-        urls.toArray(new URL[0]), currentClassLoader);
-
-    try {
-      classLoader.loadClass(
-          "org.apache.hadoop.fs.ozone.OzoneFSInputStream");
-      ClassLoader expectedClassLoader =
-          OzoneFSInputStream.class.getClassLoader();
-      assertEquals(expectedClassLoader, currentClassLoader);
-    } catch (ClassNotFoundException e) {
-      e.printStackTrace();
-    }
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
deleted file mode 100644
index d204ad5..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import static org.junit.Assert.assertEquals;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import org.powermock.api.mockito.PowerMockito;
-import org.powermock.core.classloader.annotations.PowerMockIgnore;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-
-/**
- * Ozone File system tests that are light weight and use mocks.
- */
-@RunWith(PowerMockRunner.class)
-@PrepareForTest({ OzoneClientFactory.class, UserGroupInformation.class })
-@PowerMockIgnore("javax.management.*")
-public class TestOzoneFileSystemWithMocks {
-
-  @Test
-  public void testFSUriWithHostPortOverrides() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    mockClientFactory(conf, 5899);
-    mockUser();
-
-    URI uri = new URI("o3fs://bucket1.volume1.local.host:5899");
-    FileSystem fileSystem = FileSystem.get(uri, conf);
-    OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem;
-
-    assertEquals("bucket1.volume1.local.host:5899",
-        ozfs.getUri().getAuthority());
-    PowerMockito.verifyStatic();
-    OzoneClientFactory.getRpcClient("local.host", 5899, conf);
-  }
-
-  @Test
-  public void testFSUriWithHostPortUnspecified() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    final int omPort = OmUtils.getOmRpcPort(conf);
-    mockClientFactory(conf, omPort);
-    mockUser();
-
-    URI uri = new URI("o3fs://bucket1.volume1.local.host");
-    FileSystem fileSystem = FileSystem.get(uri, conf);
-    OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem;
-
-    assertEquals("bucket1.volume1.local.host", ozfs.getUri().getHost());
-    // The URI doesn't contain a port number, expect -1 from getPort()
-    assertEquals(ozfs.getUri().getPort(), -1);
-    PowerMockito.verifyStatic();
-    // Check the actual port number in use
-    OzoneClientFactory.getRpcClient("local.host", omPort, conf);
-  }
-
-  @Test
-  public void testFSUriHostVersionDefault() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    mockClientFactory(conf);
-    mockUser();
-
-    URI uri = new URI("o3fs://bucket1.volume1/key");
-    FileSystem fileSystem = FileSystem.get(uri, conf);
-    OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem;
-
-    assertEquals("bucket1.volume1", ozfs.getUri().getAuthority());
-    PowerMockito.verifyStatic();
-    OzoneClientFactory.getRpcClient(conf);
-  }
-
-  @Test
-  public void testReplicationDefaultValue()
-      throws IOException, URISyntaxException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    int defaultValue = conf.getInt(OzoneConfigKeys.OZONE_REPLICATION, 3);
-    mockClientFactory(conf);
-    mockUser();
-
-    URI uri = new URI("o3fs://bucket1.volume1/key");
-    FileSystem fs = FileSystem.get(uri, conf);
-
-    assertEquals(defaultValue, fs.getDefaultReplication(new Path("/any")));
-  }
-
-  @Test
-  public void testReplicationCustomValue()
-      throws IOException, URISyntaxException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    short configured = 1;
-    conf.setInt(OzoneConfigKeys.OZONE_REPLICATION, configured);
-    mockClientFactory(conf);
-    mockUser();
-
-    URI uri = new URI("o3fs://bucket1.volume1/key");
-    FileSystem fs = FileSystem.get(uri, conf);
-
-    assertEquals(configured, fs.getDefaultReplication(new Path("/any")));
-  }
-
-  private OzoneClient mockClient() throws IOException {
-    OzoneClient ozoneClient = mock(OzoneClient.class);
-    ObjectStore objectStore = mock(ObjectStore.class);
-    OzoneVolume volume = mock(OzoneVolume.class);
-    OzoneBucket bucket = mock(OzoneBucket.class);
-
-    when(ozoneClient.getObjectStore()).thenReturn(objectStore);
-    when(objectStore.getVolume(eq("volume1"))).thenReturn(volume);
-    when(volume.getBucket("bucket1")).thenReturn(bucket);
-    return ozoneClient;
-  }
-
-  private void mockClientFactory(ConfigurationSource conf, int omPort)
-      throws IOException {
-    OzoneClient ozoneClient = mockClient();
-
-    PowerMockito.mockStatic(OzoneClientFactory.class);
-    PowerMockito.when(OzoneClientFactory.getRpcClient(eq("local.host"),
-        eq(omPort), any())).thenReturn(ozoneClient);
-  }
-
-  private void mockClientFactory(ConfigurationSource conf) throws IOException {
-    OzoneClient ozoneClient = mockClient();
-
-    PowerMockito.mockStatic(OzoneClientFactory.class);
-    PowerMockito.when(OzoneClientFactory.getRpcClient(any()))
-        .thenReturn(ozoneClient);
-  }
-
-  private void mockUser() throws IOException {
-    UserGroupInformation ugi = mock(UserGroupInformation.class);
-    PowerMockito.mockStatic(UserGroupInformation.class);
-    PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi);
-    when(ugi.getShortUserName()).thenReturn("user1");
-  }
-}
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java
deleted file mode 100644
index 3659cdc..0000000
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.ByteBuffer;
-import java.nio.ReadOnlyBufferException;
-import java.util.Arrays;
-import java.util.EnumSet;
-
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.GlobalStorageStatistics;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageStatistics;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import org.junit.Before;
-import org.junit.Test;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyBoolean;
-import static org.mockito.Mockito.anyInt;
-import static org.mockito.Mockito.anyShort;
-import static org.mockito.Mockito.anyString;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.when;
-
-/**
- * Tests to check if bytes read and written and corresponding read and write
- * operation counts are accounted properly in FileSystem statistics, when the
- * FileSystem API is used to read data from Ozone.
- */
-public class TestReadWriteStatistics {
-
-  private Path aPath = new Path("/afile");
-  private byte[] buff = new byte[512];
-
-  @Test
-  public void testZeroBytesReadWhenExceptionWasThrown() throws Exception {
-    setupFakeInputStreamToThrowIOExceptionOnRead();
-    FSDataInputStream stream = fs.open(aPath);
-
-    try {
-      stream.read();
-    } catch (IOException e){
-      // Expected
-    }
-
-    assertBytesReadAndReadNumOps(0, 1);
-  }
-
-  @Test
-  public void testZeroBytesReadWhenEOFReached() throws Exception {
-    setupFakeInputStreamToReadByte(-1);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read();
-
-    assertBytesReadAndReadNumOps(0, 1);
-  }
-
-  @Test
-  public void testOneByteReadOnSingleReadCall() throws Exception {
-    setupFakeInputStreamToReadByte(20);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read();
-
-    assertBytesReadAndReadNumOps(1, 1);
-  }
-
-  @Test
-  public void testConsecutiveReadsIncreaseStats() throws Exception {
-    setupFakeInputStreamToReadByte(20);
-    FSDataInputStream stream = fs.open(aPath);
-
-    for (int i = 1; i <= 5; i++) {
-      stream.read();
-
-      assertBytesReadAndReadNumOps(i, 1);
-    }
-  }
-
-  @Test
-  public void testConsecutiveOpensAndReadsIncreaseStats() throws Exception {
-    setupFakeInputStreamToReadByte(20);
-
-    for (int i = 0; i < 5; i++) {
-      FSDataInputStream stream = fs.open(aPath);
-      stream.read();
-      stream.close();
-
-      assertBytesReadAndReadNumOps(i+1, i+1);
-    }
-  }
-
-  @Test
-  public void testConsecutiveOpensIncreaseStats() throws Exception {
-    setupFakeInputStreamToReadByte(20);
-
-    for (int i = 1; i <= 5; i++) {
-      FSDataInputStream stream = fs.open(aPath);
-      stream.close();
-
-      assertBytesReadAndReadNumOps(0, i);
-    }
-  }
-
-  @Test
-  public void testZeroBytesReadOnMultiByteReadWhenExceptionWasThrown()
-      throws Exception {
-    setupFakeInputStreamToThrowExceptionOnMultiByteRead();
-    FSDataInputStream stream = fs.open(aPath);
-
-    try {
-      stream.read(buff, 0, buff.length);
-    } catch (IOException e) {
-      // Expected
-    }
-
-    assertBytesReadAndReadNumOps(0, 1);
-  }
-
-  @Test
-  public void testZeroBytesReadOnMultiByteReadWhenEOFReachedAtStart()
-      throws Exception {
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(-1);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read(buff, 0, buff.length);
-
-    assertBytesReadAndReadNumOps(0, 1);
-  }
-
-  @Test
-  public void testEOFBeforeLengthOnMultiByteRead() throws Exception {
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(256);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read(buff, 0, buff.length);
-
-    assertBytesReadAndReadNumOps(256, 1);
-  }
-
-  @Test
-  public void testFullyReadBufferOnMultiByteRead() throws Exception {
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(buff.length);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read(buff, 0, buff.length);
-
-    assertBytesReadAndReadNumOps(buff.length, 1);
-  }
-
-  @Test
-  public void testConsecutiveReadsToBufferOnMultiByteRead() throws Exception {
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(buff.length);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read(buff, 0, buff.length);
-    stream.read(buff, 0, buff.length);
-    assertBytesReadAndReadNumOps(2*buff.length, 1);
-
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(256);
-    stream.read(buff, 0, 256);
-
-    assertBytesReadAndReadNumOps(2*buff.length + 256, 1);
-  }
-
-  @Test
-  public void testZeroBytesWrittenWhenExceptionWasThrown() throws Exception {
-    setupFakeOutputStreamToThrowIOExceptionOnWrite();
-    FSDataOutputStream stream = fs.create(aPath);
-
-    try {
-      stream.write(20);
-    } catch (IOException e) {
-      //Expected
-    }
-
-    assertBytesWrittenAndWriteNumOps(0, 1);
-  }
-
-  @Test
-  public void testOneByteWrittenOnSingleWriteCall() throws Exception {
-    FSDataOutputStream stream = fs.create(aPath);
-
-    stream.write(20);
-
-    assertBytesWrittenAndWriteNumOps(1, 1);
-  }
-
-  @Test
-  public void testConsecutiveWritesIncreaseStats() throws Exception {
-    FSDataOutputStream stream = fs.create(aPath);
-
-    for(int i = 1; i <= 5; i++){
-      stream.write(20);
-
-      assertBytesWrittenAndWriteNumOps(i, 1);
-    }
-  }
-
-  @Test
-  public void testConsecutiveCreatesAndWritesIncreaseStats() throws Exception {
-    for(int i = 1; i <= 5; i++){
-      FSDataOutputStream stream = fs.create(aPath);
-
-      stream.write(20);
-
-      assertBytesWrittenAndWriteNumOps(i, i);
-    }
-  }
-
-  @Test
-  public void testConsecutiveCreatesIncreaseStats() throws Exception {
-    for(int i = 1; i <= 5; i++){
-      fs.create(aPath);
-
-      assertBytesWrittenAndWriteNumOps(0, i);
-    }
-  }
-
-  @Test
-  public void testBufferReadCallsIncreaseStatistics()
-      throws Exception {
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(128);
-    ByteBuffer buffer = ByteBuffer.wrap(buff);
-    FSDataInputStream stream = fs.open(aPath);
-
-    stream.read(buffer);
-
-    assertBytesReadAndReadNumOps(128, 1);
-  }
-
-  @Test
-  public void testReadToReadOnlyBufferDoesNotChangeStats() throws Exception {
-    setupFakeInputStreamToReadNumBytesOnMultiByteRead(128);
-    ByteBuffer buffer = ByteBuffer.wrap(buff).asReadOnlyBuffer();
-    FSDataInputStream stream = fs.open(aPath);
-
-    try {
-      stream.read(buffer);
-    } catch (ReadOnlyBufferException e) {
-      // Expected
-    }
-
-    assertBytesReadAndReadNumOps(0, 1);
-  }
-
-  @Test
-  public void testZeroBytesWrittenOnMultiByteWriteWhenExceptionWasThrown()
-      throws Exception {
-    setupFakeOutputStreamToThrowIOExceptionOnMultiByteWrite();
-    FSDataOutputStream stream = fs.create(aPath);
-
-    try {
-      stream.write(buff, 0, buff.length);
-    } catch (IOException e) {
-      // Expected
-    }
-
-    assertBytesWrittenAndWriteNumOps(0, 1);
-  }
-
-  @Test
-  public void testBufferFullyWrittenOnMultiByteWrite() throws Exception {
-    FSDataOutputStream stream = fs.create(aPath);
-
-    stream.write(buff, 0, buff.length);
-
-    assertBytesWrittenAndWriteNumOps(buff.length, 1);
-  }
-
-  @Test
-  public void testBufferPartiallyWrittenOnMultiByteWrite() throws Exception {
-    FSDataOutputStream stream = fs.create(aPath);
-
-    stream.write(buff, buff.length/2, buff.length/4);
-
-    assertBytesWrittenAndWriteNumOps(buff.length/4, 1);
-  }
-
-  @Test
-  public void testConsecutiveMultiByteWritesIncreaseStats() throws Exception {
-    FSDataOutputStream stream = fs.create(aPath);
-
-    for(int i = 1; i <=5; i++) {
-      stream.write(buff, 0, buff.length);
-      assertBytesWrittenAndWriteNumOps((i * buff.length), 1);
-    }
-
-    stream.write(buff, 0, 128);
-    assertBytesWrittenAndWriteNumOps((128 + (5 * buff.length)), 1);
-  }
-
-  @Test
-  public void testNonRecursiveCreateIncreaseStats() throws Exception {
-    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE);
-    for(int i = 1; i <=5; i++){
-      FSDataOutputStream stream =
-          fs.createNonRecursive(aPath, null, flags, 512, (short) 3, 512, null);
-
-      assertBytesWrittenAndWriteNumOps(0, i);
-    }
-  }
-
-  @Test(expected = UnsupportedOperationException.class)
-  public void testsIfAppendGetsSupported() throws Exception {
-    fs.append(aPath, 512, null);
-    fail("Add tests to cover metrics changes on append!");
-  }
-
-  // INTERNALS
-  //TODO: check on this why it is not equals to OzoneFSStorageStatistics.NAME
-  // as I believe this should be the value instead of the seen one.
-
-  private static final String O3FS_STORAGE_STAT_NAME = "o3fs";
-  // See Hadoop main project's FileSystemStorageStatistics class for the KEYS
-  // there are these defined as well.
-  private static final String STAT_NAME_BYTES_READ = "bytesRead";
-
-  private static final String STAT_NAME_BYTES_WRITTEN = "bytesWritten";
-  private static final String STAT_NAME_READ_OPS = "readOps";
-  private static final String STAT_NAME_LARGE_READ_OPS = "largeReadOps";
-  private static final String STAT_NAME_WRITE_OPS = "writeOps";
-  // These are out of scope at this time, included here in comment to have the
-  // full list of possible keys.
-  // private static final String STAT_NAME_BYTES_READ_LOCALHOST = "";
-  // private static final String STAT_NAME_BYTES_READ_DISTANCE_1_OR_2 = "";
-  // private static final String STAT_NAME_BYTES_READ_DISTANCE_3_OR_4 = "";
-  // private static final String STAT_NAME_BYTES_READ_DISTANCE_5_OR_LARGER = "";
-  // private static final String STAT_NAME_BYTES_READ_ERASURE_CODED = "";
-
-  private long readValueFromFSStatistics(String valueName) {
-    GlobalStorageStatistics stats = FileSystem.getGlobalStorageStatistics();
-    StorageStatistics fsStats = stats.get(O3FS_STORAGE_STAT_NAME);
-    return fsStats.getLong(valueName);
-  }
-
-  private void assertBytesReadAndReadNumOps(
-      long expectedBytesRead, long expectedNumReadOps) {
-
-    long bytesRead = readValueFromFSStatistics(STAT_NAME_BYTES_READ);
-    long numReadOps = readValueFromFSStatistics(STAT_NAME_READ_OPS);
-    assertEquals("Bytes read.", expectedBytesRead, bytesRead);
-    assertEquals("Read op count.", expectedNumReadOps, numReadOps);
-  }
-
-  private void assertBytesWrittenAndWriteNumOps(
-      long expectedBytesWritten, long expectedNumWriteOps) {
-
-    long bytesWritten = readValueFromFSStatistics(STAT_NAME_BYTES_WRITTEN);
-    long numWriteOps = readValueFromFSStatistics(STAT_NAME_WRITE_OPS);
-    assertEquals("Bytes written.", expectedBytesWritten, bytesWritten);
-    assertEquals("Write op count.", expectedNumWriteOps, numWriteOps);
-  }
-
-
-  // TEST SETUP
-  private OzoneFileSystem fs = spy(new OzoneFileSystem());
-
-  private OzoneClientAdapter fakeAdapter = mock(OzoneClientAdapter.class);
-
-  //we need a Seekable here to check readFully comfortably
-  private InputStream fakeInputStream = mock(InputStream.class);
-
-  private OutputStream fakeOutputStream = mock(OutputStream.class);
-
-  @Before
-  public void setupMocks() throws Exception {
-    setupAdapterToReturnFakeInputStreamOnReadFile();
-    setupAdapterToReturnFakeOutputStreamOnCreate();
-    setupFileSystemToUseFakeClientAdapter();
-    initializeFS();
-    Arrays.fill(buff, (byte) 20);
-  }
-
-  private void setupAdapterToReturnFakeInputStreamOnReadFile()
-      throws IOException {
-    when(fakeAdapter.readFile(anyString())).thenReturn(fakeInputStream);
-  }
-
-  private void setupAdapterToReturnFakeOutputStreamOnCreate() throws Exception {
-    when(fakeAdapter.createFile(anyString(), anyShort(), anyBoolean(),
-        anyBoolean())).thenReturn(new OzoneFSOutputStream(fakeOutputStream));
-  }
-
-  private void setupFileSystemToUseFakeClientAdapter() throws IOException {
-    doReturn(fakeAdapter).when(fs).createAdapter(any(ConfigurationSource.class),
-        anyString(), anyString(), anyString(), anyInt(), anyBoolean());
-  }
-
-  private void initializeFS() throws IOException, URISyntaxException {
-    FileSystem.getGlobalStorageStatistics().reset();
-    URI fsUri = new URI("o3fs://volume.bucket.localhost");
-    OzoneConfiguration conf = new OzoneConfiguration();
-    fs.initialize(fsUri, conf);
-  }
-
-  private void setupFakeInputStreamToThrowIOExceptionOnRead()
-      throws IOException {
-    when(fakeInputStream.read()).thenThrow(new IOException("Simulated IOE"));
-  }
-
-  private void setupFakeInputStreamToReadByte(int byteToReturn)
-      throws IOException {
-    when(fakeInputStream.read()).thenReturn(byteToReturn);
-  }
-
-  private void setupFakeInputStreamToThrowExceptionOnMultiByteRead()
-      throws Exception {
-    when(fakeInputStream.read(any(byte[].class), anyInt(), anyInt()))
-        .thenThrow(new IOException("Simulated IOE"));
-  }
-
-  private void setupFakeInputStreamToReadNumBytesOnMultiByteRead(
-      int numOfBytesToReturn) throws Exception {
-    when(fakeInputStream.read(any(byte[].class), anyInt(), anyInt()))
-        .thenReturn(numOfBytesToReturn);
-  }
-
-  private void setupFakeOutputStreamToThrowIOExceptionOnWrite()
-      throws Exception {
-    doThrow(new IOException("Simulated IOE"))
-        .when(fakeOutputStream).write(anyInt());
-  }
-
-  private void setupFakeOutputStreamToThrowIOExceptionOnMultiByteWrite()
-      throws Exception {
-    doThrow(new IOException("Simulated IOE"))
-        .when(fakeOutputStream).write(any(byte[].class), anyInt(), anyInt());
-  }
-
-}
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 69c7e0e..6cedbce 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -35,6 +35,7 @@
     <module>ozone-manager</module>
     <module>tools</module>
     <module>integration-test</module>
+    <module>ozonefs-common</module>
     <module>ozonefs</module>
     <module>datanode</module>
     <module>s3gateway</module>
@@ -97,12 +98,22 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-filesystem-lib-current</artifactId>
+        <artifactId>hadoop-ozone-filesystem-shaded</artifactId>
         <version>${ozone.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
+        <artifactId>hadoop-ozone-filesystem-common</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-filesystem-hadoop3</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-filesystem-hadoop2</artifactId>
         <version>${ozone.version}</version>
       </dependency>
       <dependency>
@@ -360,8 +371,9 @@
         </property>
       </activation>
       <modules>
-        <module>ozonefs-lib-current</module>
-        <module>ozonefs-lib-legacy</module>
+        <module>ozonefs-shaded</module>
+        <module>ozonefs-hadoop2</module>
+        <module>ozonefs-hadoop3</module>
       </modules>
     </profile>
     <profile>


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org