You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ad...@apache.org on 2023/03/07 06:59:54 UTC

[ozone] 01/02: Revert "HDDS-5447. HttpFS support in Ozone (#4356)"

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 584d68bd51276b57479350dc773cbefb53f67461
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Tue Mar 7 07:56:35 2023 +0100

    Revert "HDDS-5447. HttpFS support in Ozone (#4356)"
    
    This reverts commit 183600b0631e9a97c0708007298135f55fe32438.
---
 hadoop-hdds/docs/content/design/httpfs.md          |   31 -
 hadoop-hdds/docs/content/interface/HttpFS.md       |  119 --
 hadoop-hdds/docs/content/tools/_index.md           |    1 -
 .../dist/dev-support/bin/dist-layout-stitching     |    2 -
 hadoop-ozone/dist/pom.xml                          |   12 +-
 .../src/main/compose/ozone-ha/docker-compose.yaml  |    8 -
 .../dist/src/main/compose/ozone-ha/docker-config   |    4 -
 .../dist/src/main/compose/ozone-ha/test.sh         |    1 -
 .../src/main/compose/ozone/docker-compose.yaml     |    8 -
 .../dist/src/main/compose/ozone/docker-config      |    3 -
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |    1 -
 .../compose/ozonesecure-ha/docker-compose.yaml     |   18 -
 .../src/main/compose/ozonesecure-ha/docker-config  |   19 +-
 .../dist/src/main/compose/ozonesecure-ha/test.sh   |    2 -
 .../main/compose/ozonesecure/docker-compose.yaml   |   16 +-
 .../src/main/compose/ozonesecure/docker-config     |   17 +-
 .../dist/src/main/compose/ozonesecure/test.sh      |    2 -
 hadoop-ozone/dist/src/main/keytabs/HTTP.keytab     |  Bin 580 -> 432 bytes
 hadoop-ozone/dist/src/main/keytabs/dn.keytab       |  Bin 278 -> 278 bytes
 hadoop-ozone/dist/src/main/keytabs/hadoop.keytab   |  Bin 146 -> 146 bytes
 hadoop-ozone/dist/src/main/keytabs/httpfs.keytab   |  Bin 458 -> 0 bytes
 hadoop-ozone/dist/src/main/keytabs/jhs.keytab      |  Bin 142 -> 142 bytes
 hadoop-ozone/dist/src/main/keytabs/nm.keytab       |  Bin 138 -> 138 bytes
 hadoop-ozone/dist/src/main/keytabs/om.keytab       |  Bin 278 -> 278 bytes
 hadoop-ozone/dist/src/main/keytabs/recon.keytab    |  Bin 296 -> 296 bytes
 hadoop-ozone/dist/src/main/keytabs/rm.keytab       |  Bin 138 -> 138 bytes
 hadoop-ozone/dist/src/main/keytabs/s3g.keytab      |  Bin 434 -> 434 bytes
 hadoop-ozone/dist/src/main/keytabs/scm.keytab      |  Bin 586 -> 586 bytes
 hadoop-ozone/dist/src/main/keytabs/testuser.keytab |  Bin 606 -> 450 bytes
 .../dist/src/main/keytabs/testuser2.keytab         |  Bin 154 -> 154 bytes
 .../dist/src/main/keytabs/update-keytabs.sh        |    6 -
 hadoop-ozone/dist/src/main/license/bin/LICENSE.txt |    5 -
 hadoop-ozone/dist/src/main/license/jar-report.txt  |    6 -
 .../src/main/smoketest/httpfs/operations.robot     |   49 -
 .../main/smoketest/httpfs/operations_tests.robot   |  164 --
 hadoop-ozone/dist/src/shell/ozone/ozone            |    7 -
 hadoop-ozone/httpfsgateway/README.txt              |   17 -
 .../dev-support/findbugsExcludeFile.xml            |   41 -
 hadoop-ozone/httpfsgateway/pom.xml                 |  333 ----
 .../httpfsgateway/src/main/conf/httpfs-env.sh      |   59 -
 .../src/main/conf/httpfs-log4j.properties          |   35 -
 .../httpfsgateway/src/main/conf/httpfs-site.xml    |   17 -
 .../org/apache/ozone/fs/http/HttpFSConstants.java  |  199 --
 .../org/apache/ozone/fs/http/package-info.java     |   21 -
 .../http/server/CheckUploadContentTypeFilter.java  |  115 --
 .../apache/ozone/fs/http/server/FSOperations.java  | 2105 --------------------
 .../fs/http/server/HttpFSAuthenticationFilter.java |  144 --
 .../fs/http/server/HttpFSExceptionProvider.java    |  113 --
 .../fs/http/server/HttpFSParametersProvider.java   |  724 -------
 .../ozone/fs/http/server/HttpFSReleaseFilter.java  |   43 -
 .../apache/ozone/fs/http/server/HttpFSServer.java  | 1394 -------------
 .../ozone/fs/http/server/HttpFSServerWebApp.java   |  162 --
 .../fs/http/server/HttpFSServerWebServer.java      |  188 --
 .../org/apache/ozone/fs/http/server/JsonUtil.java  |  512 -----
 .../http/server/metrics/HttpFSServerMetrics.java   |  164 --
 .../ozone/fs/http/server/metrics/package-info.java |   26 -
 .../apache/ozone/fs/http/server/package-info.java  |   21 -
 .../apache/ozone/hdfs/web/WebHdfsConstants.java    |   52 -
 .../org/apache/ozone/hdfs/web/package-info.java    |   21 -
 .../apache/ozone/lib/lang/RunnableCallable.java    |   99 -
 .../java/org/apache/ozone/lib/lang/XException.java |  140 --
 .../org/apache/ozone/lib/lang/package-info.java    |   21 -
 .../org/apache/ozone/lib/server/BaseService.java   |  185 --
 .../java/org/apache/ozone/lib/server/Server.java   |  841 --------
 .../apache/ozone/lib/server/ServerException.java   |   98 -
 .../java/org/apache/ozone/lib/server/Service.java  |   84 -
 .../apache/ozone/lib/server/ServiceException.java  |   43 -
 .../org/apache/ozone/lib/server/package-info.java  |   21 -
 .../apache/ozone/lib/service/FileSystemAccess.java |   55 -
 .../lib/service/FileSystemAccessException.java     |   63 -
 .../java/org/apache/ozone/lib/service/Groups.java  |   34 -
 .../apache/ozone/lib/service/Instrumentation.java  |   67 -
 .../org/apache/ozone/lib/service/Scheduler.java    |   42 -
 .../service/hadoop/FileSystemAccessService.java    |  465 -----
 .../ozone/lib/service/hadoop/package-info.java     |   21 -
 .../instrumentation/InstrumentationService.java    |  446 -----
 .../lib/service/instrumentation/package-info.java  |   21 -
 .../org/apache/ozone/lib/service/package-info.java |   21 -
 .../lib/service/scheduler/SchedulerService.java    |  150 --
 .../ozone/lib/service/scheduler/package-info.java  |   21 -
 .../ozone/lib/service/security/GroupsService.java  |   61 -
 .../ozone/lib/service/security/package-info.java   |   21 -
 .../ozone/lib/servlet/FileSystemReleaseFilter.java |  115 --
 .../apache/ozone/lib/servlet/HostnameFilter.java   |  115 --
 .../org/apache/ozone/lib/servlet/MDCFilter.java    |  108 -
 .../org/apache/ozone/lib/servlet/ServerWebApp.java |  245 ---
 .../org/apache/ozone/lib/servlet/package-info.java |   21 -
 .../main/java/org/apache/ozone/lib/util/Check.java |  221 --
 .../apache/ozone/lib/util/ConfigurationUtils.java  |   98 -
 .../org/apache/ozone/lib/util/package-info.java    |   21 -
 .../org/apache/ozone/lib/wsrs/BooleanParam.java    |   50 -
 .../java/org/apache/ozone/lib/wsrs/ByteParam.java  |   42 -
 .../java/org/apache/ozone/lib/wsrs/EnumParam.java  |   50 -
 .../org/apache/ozone/lib/wsrs/EnumSetParam.java    |   77 -
 .../apache/ozone/lib/wsrs/ExceptionProvider.java   |   64 -
 .../apache/ozone/lib/wsrs/InputStreamEntity.java   |   67 -
 .../org/apache/ozone/lib/wsrs/IntegerParam.java    |   42 -
 .../org/apache/ozone/lib/wsrs/JSONMapProvider.java |   82 -
 .../org/apache/ozone/lib/wsrs/JSONProvider.java    |   81 -
 .../java/org/apache/ozone/lib/wsrs/LongParam.java  |   42 -
 .../main/java/org/apache/ozone/lib/wsrs/Param.java |   69 -
 .../java/org/apache/ozone/lib/wsrs/Parameters.java |   82 -
 .../apache/ozone/lib/wsrs/ParametersProvider.java  |  128 --
 .../java/org/apache/ozone/lib/wsrs/ShortParam.java |   49 -
 .../org/apache/ozone/lib/wsrs/StringParam.java     |   73 -
 .../org/apache/ozone/lib/wsrs/package-info.java    |   21 -
 .../main/libexec/shellprofile.d/hadoop-httpfs.sh   |   54 -
 .../src/main/resources/default-log4j.properties    |   20 -
 .../src/main/resources/httpfs-default.xml          |  337 ----
 .../src/main/resources/httpfs.properties           |   21 -
 .../src/main/resources/webapps/static/index.html   |   36 -
 .../main/resources/webapps/webhdfs/WEB-INF/web.xml |   98 -
 hadoop-ozone/httpfsgateway/src/main/sbin/httpfs.sh |   64 -
 .../httpfsgateway/src/main/webapp/WEB-INF/web.xml  |   98 -
 .../httpfsgateway/src/site/configuration.xsl       |   49 -
 .../httpfsgateway/src/site/resources/css/site.css  |   30 -
 hadoop-ozone/httpfsgateway/src/site/site.xml       |   29 -
 hadoop-ozone/pom.xml                               |    6 -
 pom.xml                                            |   12 -
 119 files changed, 7 insertions(+), 12837 deletions(-)

diff --git a/hadoop-hdds/docs/content/design/httpfs.md b/hadoop-hdds/docs/content/design/httpfs.md
deleted file mode 100644
index ad174199aa..0000000000
--- a/hadoop-hdds/docs/content/design/httpfs.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-title: HttpFS support for Ozone
-summary: HttpFS is a WebHDFS compatible interface that is added as a separate role to Ozone.
-date: 2023-02-03
-jira: HDDS-5447
-status: implemented
-author: Zita Dombi, Istvan Fajth
----
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Abstract
-
-Ozone HttpFS provides an HttpFS-compatible REST API interface to enable applications
-that are designed to use [HttpFS](https://hadoop.apache.org/docs/stable/hadoop-hdfs-httpfs/index.html)
-to interact and integrate with Ozone.
-
-# Link
-
-https://issues.apache.org/jira/secure/attachment/13031822/HTTPFS%20interface%20for%20Ozone.pdf
diff --git a/hadoop-hdds/docs/content/interface/HttpFS.md b/hadoop-hdds/docs/content/interface/HttpFS.md
deleted file mode 100644
index e413faf03c..0000000000
--- a/hadoop-hdds/docs/content/interface/HttpFS.md
+++ /dev/null
@@ -1,119 +0,0 @@
----
-title: HttpFS Gateway
-weight: 7
-menu:
-    main:
-        parent: "Client Interfaces"
-summary: Ozone HttpFS is a WebHDFS compatible interface implementation, as a separate role it provides an easy integration with Ozone.
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone HttpFS can be used to integrate Ozone with other tools via REST API.
-
-## Introduction
-
-Ozone HttpFS is forked from the HDFS HttpFS endpoint implementation ([HDDS-5448](https://issues.apache.org/jira/browse/HDDS-5448)). Ozone HttpFS is intended to be added optionally as a role in an Ozone cluster, similar to [S3 Gateway]({{< ref "design/s3gateway.md" >}}).
-
-HttpFS is a service that provides a REST HTTP gateway supporting File System operations (read and write). It is interoperable with the **webhdfs** REST HTTP API.
-
-HttpFS can be used to access data on an Ozone cluster behind of a firewall. For example, the HttpFS service acts as a gateway and is the only system that is allowed to cross the firewall into the cluster.
-
-HttpFS can be used to access data in Ozone using HTTP utilities (such as curl and wget) and HTTP libraries Perl from other languages than Java.
-
-The **webhdfs** client FileSystem implementation can be used to access HttpFS using the Ozone filesystem command line tool (`ozone fs`) as well as from Java applications using the Hadoop FileSystem Java API.
-
-HttpFS has built-in security supporting Hadoop pseudo authentication and Kerberos SPNEGO and other pluggable authentication mechanisms. It also provides Hadoop proxy user support.
-
-
-## Getting started
-
-HttpFS service itself is a Jetty based web-application that uses the Hadoop FileSystem API to talk to the cluster, it is a separate service which provides access to Ozone via a REST APIs. It should be started in addition to other regular Ozone components.
-
-To try it out, you can start a Docker Compose dev cluster that has an HttpFS gateway.
-
-Extract the release tarball, go to the `compose/ozone` directory and start the cluster:
-
-```bash
-docker-compose up -d --scale datanode=3
-```
-
-You can/should find now the HttpFS gateway in docker with the name `ozone_httpfs`.
-HttpFS HTTP web-service API calls are HTTP REST calls that map to an Ozone file system operation. For example, using the `curl` Unix command.
-
-E.g. in the docker cluster you can execute commands like these:
-
-* `curl -i -X PUT "http://httpfs:14000/webhdfs/v1/vol1?op=MKDIRS&user.name=hdfs"` creates a volume called `vol1`.
-
-
-* `$ curl 'http://httpfs-host:14000/webhdfs/v1/user/foo/README.txt?op=OPEN&user.name=foo'` returns the content of the key `/user/foo/README.txt`.
-
-
-## Supported operations
-
-Here are the tables of WebHDFS REST APIs and their state of support in Ozone.
-
-### File and Directory Operations
-
-Operation                       |      Support
---------------------------------|---------------------
-Create and Write to a File      | supported
-Append to a File                | not implemented in Ozone
-Concat File(s)                  | not implemented in Ozone
-Open and Read a File            | supported
-Make a Directory                | supported
-Create a Symbolic Link          | not implemented in Ozone
-Rename a File/Directory         | supported (with limitations)
-Delete a File/Directory         | supported
-Truncate a File                 | not implemented in Ozone
-Status of a File/Directory      | supported
-List a Directory                | supported
-List a File                     | supported
-Iteratively List a Directory    | supported
-
-
-### Other File System Operations
-
-Operation                             |      Support
---------------------------------------|---------------------
-Get Content Summary of a Directory    | supported
-Get Quota Usage of a Directory        | supported
-Set Quota                             | not implemented in Ozone FileSystem API
-Set Quota By Storage Type             | not implemented in Ozone
-Get File Checksum                     | unsupported (to be fixed)
-Get Home Directory                    | unsupported (to be fixed)
-Get Trash Root                        | unsupported
-Set Permission                        | not implemented in Ozone FileSystem API
-Set Owner                             | not implemented in Ozone FileSystem API
-Set Replication Factor                | not implemented in Ozone FileSystem API
-Set Access or Modification Time       | not implemented in Ozone FileSystem API
-Modify ACL Entries                    | not implemented in Ozone FileSystem API
-Remove ACL Entries                    | not implemented in Ozone FileSystem API
-Remove Default ACL                    | not implemented in Ozone FileSystem API
-Remove ACL                            | not implemented in Ozone FileSystem API
-Set ACL                               | not implemented in Ozone FileSystem API
-Get ACL Status                        | not implemented in Ozone FileSystem API
-Check access                          | not implemented in Ozone FileSystem API
-
-
-
-## Hadoop user and developer documentation about HttpFS
-
-* [HttpFS Server Setup](https://hadoop.apache.org/docs/stable/hadoop-hdfs-httpfs/ServerSetup.html)
-
-* [Using HTTP Tools](https://hadoop.apache.org/docs/stable/hadoop-hdfs-httpfs/ServerSetup.html)
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/tools/_index.md b/hadoop-hdds/docs/content/tools/_index.md
index c44be17eff..ca5d5140dd 100644
--- a/hadoop-hdds/docs/content/tools/_index.md
+++ b/hadoop-hdds/docs/content/tools/_index.md
@@ -37,7 +37,6 @@ Daemon commands:
    stopped.
    * **s3g** - Start the S3 compatible REST gateway
    * **recon** - The Web UI service of Ozone can be started with this command.
-   * **httpfs** - Start the HttpFS gateway
    
 Client commands:
 
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index 2ad8c7419a..857147d248 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -76,8 +76,6 @@ run mkdir -p ./bin
 run mkdir -p ./sbin
 run mkdir -p ./etc
 run mkdir -p ./libexec
-run mkdir -p ./log
-run mkdir -p ./temp
 run mkdir -p ./tests
 
 run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" "etc/hadoop"
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index 40a55033f9..659c39c3d1 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -28,8 +28,8 @@
   <properties>
     <file.encoding>UTF-8</file.encoding>
     <downloadSources>true</downloadSources>
-    <docker.ozone-runner.version>20230104-1</docker.ozone-runner.version>
-    <docker.ozone-testkr5b.image>apache/ozone-testkrb5:20211102-1</docker.ozone-testkr5b.image>
+    <docker.ozone-runner.version>20220623-1</docker.ozone-runner.version>
+    <docker.ozone-testkr5b.image>apache/ozone-testkrb5:20210419-1</docker.ozone-testkr5b.image>
   </properties>
 
   <build>
@@ -74,8 +74,8 @@
               </outputDirectory>
               <includes>*.classpath</includes>
               <includeArtifactIds>
-                hdds-server-scm,ozone-common,ozone-csi,ozone-datanode,ozone-httpfsgateway,
-                ozone-insight,ozone-manager,ozone-recon,ozone-s3gateway,ozone-tools
+                hdds-server-scm,ozone-common,ozone-csi,ozone-datanode,ozone-insight,
+                ozone-manager,ozone-recon,ozone-s3gateway,ozone-tools
               </includeArtifactIds>
             </configuration>
           </execution>
@@ -223,10 +223,6 @@
       <groupId>org.apache.ozone</groupId>
       <artifactId>ozone-insight</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.apache.ozone</groupId>
-      <artifactId>ozone-httpfsgateway</artifactId>
-    </dependency>
   </dependencies>
   <profiles>
     <profile>
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
index 1e1d48d133..7332d7c7f3 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
@@ -100,14 +100,6 @@ services:
       OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
       <<: *replication
     command: ["ozone","scm"]
-  httpfs:
-    <<: *common-config
-    environment:
-      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
-      <<: *replication
-    ports:
-      - 14000:14000
-    command: [ "ozone","httpfs" ]
   s3g:
     <<: *common-config
     environment:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
index 6fb4003b6b..c22505ff9a 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
@@ -14,10 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# For HttpFS service it is required to enable proxying users.
-CORE-SITE.XML_hadoop.proxyuser.hadoop.hosts=*
-CORE-SITE.XML_hadoop.proxyuser.hadoop.groups=*
-
 CORE-SITE.XML_fs.defaultFS=ofs://omservice/
 
 OZONE-SITE.XML_ozone.om.service.ids=omservice
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
index 867c6923f5..3a21ef475d 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
@@ -34,7 +34,6 @@ execute_robot_test ${SCM} basic/ozone-shell-single.robot
 execute_robot_test ${SCM} basic/links.robot
 execute_robot_test ${SCM} s3
 execute_robot_test ${SCM} freon
-execute_robot_test ${SCM} -v USERNAME:httpfs httpfs
 
 stop_docker_env
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
index 15dea75d83..72303abaf6 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
@@ -60,14 +60,6 @@ services:
       OZONE_OPTS:
       <<: *replication
     command: ["ozone","scm"]
-  httpfs:
-    <<: *common-config
-    environment:
-      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
-      <<: *replication
-    ports:
-      - 14000:14000
-    command: [ "ozone","httpfs" ]
   s3g:
     <<: *common-config
     environment:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index ecd29a76f9..195bad6872 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -16,9 +16,6 @@
 
 CORE-SITE.XML_fs.defaultFS=ofs://om
 CORE-SITE.XML_fs.trash.interval=1
-# For HttpFS service it is required to enable proxying users.
-CORE-SITE.XML_hadoop.proxyuser.hadoop.hosts=*
-CORE-SITE.XML_hadoop.proxyuser.hadoop.groups=*
 
 OZONE-SITE.XML_ozone.om.address=om
 OZONE-SITE.XML_ozone.om.http-address=om:9874
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
index b61c6277c9..5234c36423 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
@@ -55,7 +55,6 @@ execute_robot_test scm freon
 execute_robot_test scm cli
 execute_robot_test scm admincli
 
-execute_robot_test scm -v USERNAME:httpfs httpfs
 execute_debug_tests
 
 execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ozonefs/ozonefs.robot
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml
index 03c010e9a2..914156d923 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml
@@ -183,24 +183,6 @@ services:
     networks:
       ozone_net:
         ipv4_address: 172.25.0.113
-  httpfs:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: httpfs
-    volumes:
-      - ../..:/opt/hadoop
-      - ../_keytabs:/etc/security/keytabs
-      - ./krb5.conf:/etc/krb5.conf
-    ports:
-      - 14000:14000
-    env_file:
-      - ./docker-config
-    command: [ "/opt/hadoop/bin/ozone","httpfs" ]
-    environment:
-      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
-      OZONE_OPTS:
-    networks:
-      ozone_net:
-        ipv4_address: 172.25.0.119
   s3g:
     image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
index ed82642df8..c9577874aa 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
@@ -14,10 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# For HttpFS service it is required to enable proxying users.
-CORE-SITE.XML_hadoop.proxyuser.httpfs.hosts=*
-CORE-SITE.XML_hadoop.proxyuser.httpfs.groups=*
-
 CORE-SITE.XML_fs.defaultFS=ofs://id1
 
 OZONE-SITE.XML_ozone.om.service.ids=id1
@@ -65,7 +61,7 @@ OZONE-SITE.XML_ozone.recon.address=recon:9891
 OZONE-SITE.XML_ozone.security.enabled=true
 OZONE-SITE.XML_ozone.acl.enabled=true
 OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer
-OZONE-SITE.XML_ozone.administrators="testuser/scm@EXAMPLE.COM,testuser/s3g@EXAMPLE.COM,testuser/httpfs@EXAMPLE.COM,recon/recon@EXAMPLE.COM,om/om1@EXAMPLE.COM,om/om2@EXAMPLE.COM,om/om3@EXAMPLE.COM"
+OZONE-SITE.XML_ozone.administrators="testuser/scm@EXAMPLE.COM,testuser/s3g@EXAMPLE.COM,recon/recon@EXAMPLE.COM,om/om1@EXAMPLE.COM,om/om2@EXAMPLE.COM,om/om3@EXAMPLE.COM"
 
 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
@@ -86,9 +82,6 @@ OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM
 OZONE-SITE.XML_ozone.s3g.kerberos.keytab.file=/etc/security/keytabs/s3g.keytab
 OZONE-SITE.XML_ozone.s3g.kerberos.principal=s3g/s3g@EXAMPLE.COM
 
-OZONE-SITE.XML_ozone.httpfs.kerberos.keytab.file=/etc/security/keytabs/httpfs.keytab
-OZONE-SITE.XML_ozone.httpfs.kerberos.principal=httpfs/httpfs@EXAMPLE.COM
-
 HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM
 HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab
 HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM
@@ -102,7 +95,6 @@ OZONE-SITE.XML_ozone.om.http.auth.type=kerberos
 OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos
 OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos
 OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos
-OZONE-SITE.XML_ozone.httpfs.http.auth.type=kerberos
 OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos
 
 OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/scm@EXAMPLE.COM
@@ -113,8 +105,6 @@ OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/db@EXAMPLE.COM
 OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
 OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
 OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM
-OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM
 OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=HTTP/recon@EXAMPLE.COM
 OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab
 OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab
@@ -136,13 +126,6 @@ HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 
-HTTPFS-SITE.XML_hadoop.http.authentication.type=kerberos
-HTTPFS-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-HTTPFS-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/httpfs@EXAMPLE.COM
-HTTPFS-SITE.XML_httpfs.hadoop.authentication.type=kerberos
-HTTPFS-SITE.XML_httpfs.hadoop.authentication.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-HTTPFS-SITE.XML_httpfs.hadoop.authentication.kerberos.principal=httpfs/httpfs@EXAMPLE.COM
-
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
 #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
index dcd16a802b..252f953163 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
@@ -39,8 +39,6 @@ execute_robot_test ${SCM} s3
 
 execute_robot_test ${SCM} admincli
 
-execute_robot_test ${SCM} httpfs
-
 export SCM=scm2.org
 execute_robot_test ${SCM} admincli
 stop_docker_env
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
index 4e32e9339f..5e3c0c4be3 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -63,21 +63,7 @@ services:
     env_file:
       - docker-config
     command: ["/opt/hadoop/bin/ozone","om"]
-  httpfs:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
-    hostname: httpfs
-    volumes:
-      - ../..:/opt/hadoop
-      - ../_keytabs:/etc/security/keytabs
-      - ./krb5.conf:/etc/krb5.conf
-    ports:
-      - 14000:14000
-    env_file:
-      - ./docker-config
-    command: [ "/opt/hadoop/bin/ozone","httpfs" ]
-    environment:
-      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
-      OZONE_OPTS:
+
   s3g:
     image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index 0e8f746f6d..c9df04e922 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -16,9 +16,6 @@
 
 CORE-SITE.XML_fs.defaultFS=ofs://om
 CORE-SITE.XML_fs.trash.interval=1
-# For HttpFS service it is required to enable proxying users.
-CORE-SITE.XML_hadoop.proxyuser.httpfs.hosts=*
-CORE-SITE.XML_hadoop.proxyuser.httpfs.groups=*
 
 OZONE-SITE.XML_ozone.om.address=om
 OZONE-SITE.XML_ozone.om.http-address=om:9874
@@ -53,7 +50,7 @@ OZONE-SITE.XML_ozone.recon.address=recon:9891
 OZONE-SITE.XML_ozone.security.enabled=true
 OZONE-SITE.XML_ozone.acl.enabled=true
 OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer
-OZONE-SITE.XML_ozone.administrators="testuser/scm@EXAMPLE.COM,testuser/s3g@EXAMPLE.COM,testuser/httpfs@EXAMPLE.COM,recon/recon@EXAMPLE.COM"
+OZONE-SITE.XML_ozone.administrators="testuser/scm@EXAMPLE.COM,testuser/s3g@EXAMPLE.COM,recon/recon@EXAMPLE.COM"
 OZONE-SITE.XML_ozone.recon.administrators="testuser2/scm@EXAMPLE.COM"
 
 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
@@ -75,9 +72,6 @@ OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM
 OZONE-SITE.XML_ozone.s3g.kerberos.keytab.file=/etc/security/keytabs/s3g.keytab
 OZONE-SITE.XML_ozone.s3g.kerberos.principal=s3g/s3g@EXAMPLE.COM
 
-OZONE-SITE.XML_ozone.httpfs.kerberos.keytab.file=/etc/security/keytabs/httpfs.keytab
-OZONE-SITE.XML_ozone.httpfs.kerberos.principal=httpfs/httpfs@EXAMPLE.COM
-
 OZONE-SITE.XML_hdds.scm.replication.thread.interval=5s
 OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s
 OZONE-SITE.XML_hdds.scm.replication.push=true
@@ -101,7 +95,6 @@ OZONE-SITE.XML_ozone.om.http.auth.type=kerberos
 OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos
 OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos
 OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos
-OZONE-SITE.XML_ozone.httpfs.http.auth.type=kerberos
 OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos
 
 OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/scm@EXAMPLE.COM
@@ -112,8 +105,6 @@ OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/dn@EXAMPLE.COM
 OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/dn.keytab
 OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/s3g.keytab
 OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM
-OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM
 OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=HTTP/recon@EXAMPLE.COM
 OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab
 
@@ -134,12 +125,6 @@ HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 
-HTTPFS-SITE.XML_hadoop.http.authentication.type=kerberos
-HTTPFS-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-HTTPFS-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/httpfs@EXAMPLE.COM
-HTTPFS-SITE.XML_httpfs.hadoop.authentication.type=kerberos
-HTTPFS-SITE.XML_httpfs.hadoop.authentication.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-HTTPFS-SITE.XML_httpfs.hadoop.authentication.kerberos.principal=httpfs/httpfs@EXAMPLE.COM
 KMS-SITE.XML_hadoop.kms.proxyuser.s3g.users=*
 KMS-SITE.XML_hadoop.kms.proxyuser.s3g.groups=*
 KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=*
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index 7e579fbd2f..44da97484b 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -54,8 +54,6 @@ execute_robot_test scm recon
 execute_robot_test scm admincli
 execute_robot_test scm spnego
 
-execute_robot_test scm httpfs
-
 # test replication
 docker-compose up -d --scale datanode=2
 execute_robot_test scm -v container:1 -v count:2 replication/wait.robot
diff --git a/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab b/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab
index dec34bdad7..9e7a369199 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab and b/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/dn.keytab b/hadoop-ozone/dist/src/main/keytabs/dn.keytab
index 657da108d0..ee982cb52c 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/dn.keytab and b/hadoop-ozone/dist/src/main/keytabs/dn.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab b/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab
index 40be760098..68f52383f8 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab and b/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab b/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab
deleted file mode 100755
index cfbbe68635..0000000000
Binary files a/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab and /dev/null differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/jhs.keytab b/hadoop-ozone/dist/src/main/keytabs/jhs.keytab
index e570dc6de8..51eeebff80 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/jhs.keytab and b/hadoop-ozone/dist/src/main/keytabs/jhs.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/nm.keytab b/hadoop-ozone/dist/src/main/keytabs/nm.keytab
index b582fe95ec..5cfcf9a7ed 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/nm.keytab and b/hadoop-ozone/dist/src/main/keytabs/nm.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/om.keytab b/hadoop-ozone/dist/src/main/keytabs/om.keytab
index adffa63c36..ec571b309f 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/om.keytab and b/hadoop-ozone/dist/src/main/keytabs/om.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/recon.keytab b/hadoop-ozone/dist/src/main/keytabs/recon.keytab
index d994028eea..653e0751f3 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/recon.keytab and b/hadoop-ozone/dist/src/main/keytabs/recon.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/rm.keytab b/hadoop-ozone/dist/src/main/keytabs/rm.keytab
index 956094499a..d9a55f0c2e 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/rm.keytab and b/hadoop-ozone/dist/src/main/keytabs/rm.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/s3g.keytab b/hadoop-ozone/dist/src/main/keytabs/s3g.keytab
index a89b337e97..be6684639d 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/s3g.keytab and b/hadoop-ozone/dist/src/main/keytabs/s3g.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/scm.keytab b/hadoop-ozone/dist/src/main/keytabs/scm.keytab
index eb7d81f39f..dbd316694a 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/scm.keytab and b/hadoop-ozone/dist/src/main/keytabs/scm.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/testuser.keytab b/hadoop-ozone/dist/src/main/keytabs/testuser.keytab
index c9aa8b84b9..c9c455056b 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/testuser.keytab and b/hadoop-ozone/dist/src/main/keytabs/testuser.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab b/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab
index 91cfb3e271..57c77ca876 100755
Binary files a/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab and b/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab differ
diff --git a/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh b/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh
index b55923ec64..be60da64b9 100755
--- a/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh
+++ b/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh
@@ -36,7 +36,6 @@ if [ "$1" == "internal" ]; then
 
    export_keytab testuser/scm testuser
    export_keytab testuser/s3g testuser
-   export_keytab testuser/httpfs testuser
    export_keytab testuser/om testuser
 
    export_keytab testuser2/scm testuser2
@@ -48,10 +47,6 @@ if [ "$1" == "internal" ]; then
    export_keytab HTTP/s3g s3g
    export_keytab testuser/s3g s3g
 
-   export_keytab httpfs/httpfs httpfs
-   export_keytab HTTP/httpfs httpfs
-   export_keytab testuser/httpfs httpfs
-
    export_keytab recon/recon recon
    export_keytab HTTP/recon recon
 
@@ -60,7 +55,6 @@ if [ "$1" == "internal" ]; then
 
    export_keytab HTTP/scm HTTP
    export_keytab HTTP/s3g HTTP
-   export_keytab HTTP/httpfs HTTP
    export_keytab HTTP/ozone HTTP
 
    export_keytab hadoop/rm hadoop
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index bc0277fa3c..98bef1a7dc 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -296,7 +296,6 @@ Apache License 2.0
    com.google.inject.extensions:guice-servlet
    com.google.inject:guice
    com.google.j2objc:j2objc-annotations
-   com.googlecode.json-simple:json-simple
    com.jolbox:bonecp
    com.lmax:disruptor
    com.nimbusds:nimbus-jose-jwt
@@ -367,8 +366,6 @@ Apache License 2.0
    org.apache.commons:commons-lang3
    org.apache.commons:commons-pool2
    org.apache.commons:commons-text
-   org.apache.curator:curator-client
-   org.apache.curator:curator-framework
    org.apache.derby:derby
    org.apache.hadoop:hadoop-annotations
    org.apache.hadoop:hadoop-auth
@@ -414,8 +411,6 @@ Apache License 2.0
    org.apache.ratis:ratis-thirdparty-misc
    org.apache.ratis:ratis-tools
    org.apache.thrift:libthrift
-   org.apache.zookeeper:zookeeper
-   org.apache.zookeeper:zookeeper-jute
    org.codehaus.jackson:jackson-core-asl
    org.codehaus.jackson:jackson-jaxrs
    org.codehaus.jackson:jackson-mapper-asl
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index f83427959e..cebdb1579d 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -33,8 +33,6 @@ share/ozone/lib/commons-net.jar
 share/ozone/lib/commons-pool2.jar
 share/ozone/lib/commons-text.jar
 share/ozone/lib/commons-validator.jar
-share/ozone/lib/curator-client.jar
-share/ozone/lib/curator-framework.jar
 share/ozone/lib/derby.jar
 share/ozone/lib/disruptor.jar
 share/ozone/lib/dnsjava.jar
@@ -151,7 +149,6 @@ share/ozone/lib/jooq-codegen.jar
 share/ozone/lib/jooq.jar
 share/ozone/lib/jooq-meta.jar
 share/ozone/lib/jsch.jar
-share/ozone/lib/json-simple.jar
 share/ozone/lib/json-smart.jar
 share/ozone/lib/jsp-api.jar
 share/ozone/lib/jsr305.jar
@@ -205,7 +202,6 @@ share/ozone/lib/ozone-filesystem-common.jar
 share/ozone/lib/ozone-filesystem-hadoop2.jar
 share/ozone/lib/ozone-filesystem-hadoop3.jar
 share/ozone/lib/ozone-filesystem.jar
-share/ozone/lib/ozone-httpfsgateway.jar
 share/ozone/lib/ozone-filesystem-hadoop3-client.jar
 share/ozone/lib/ozone-insight.jar
 share/ozone/lib/ozone-interface-client.jar
@@ -259,6 +255,4 @@ share/ozone/lib/stax-ex.jar
 share/ozone/lib/txw2.jar
 share/ozone/lib/weld-servlet-shaded.Final.jar
 share/ozone/lib/woodstox-core.jar
-share/ozone/lib/zookeeper.jar
-share/ozone/lib/zookeeper-jute.jar
 share/ozone/lib/zstd-jni.jar
diff --git a/hadoop-ozone/dist/src/main/smoketest/httpfs/operations.robot b/hadoop-ozone/dist/src/main/smoketest/httpfs/operations.robot
deleted file mode 100644
index ac9b56f2ff..0000000000
--- a/hadoop-ozone/dist/src/main/smoketest/httpfs/operations.robot
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Library             Process
-Library             BuiltIn
-Library             String
-
-*** Variables ***
-${URL}                  http://httpfs:14000/webhdfs/v1/
-
-*** Keywords ***
-Execute curl command
-    [Arguments]       ${path}           ${operation}    ${extra_commands}
-    ${user.name} =    Set Variable If   '${SECURITY_ENABLED}'=='false'   &user.name=${USERNAME}      ${EMPTY}
-    ${final_url} =    Catenate          SEPARATOR=      ${URL}  ${path}  ?op=  ${operation}     ${user.name}
-    ${curl_extra_commands} =            Set Variable If     '${SECURITY_ENABLED}'=='true'       --negotiate -u :    ${EMPTY}
-    ${output}         Run process       curl ${extra_commands} ${curl_extra_commands} "${final_url}"    shell=True
-    Should Be Equal As Integers         ${output.rc}    0
-    [return]          ${output}
-
-Execute create file command
-    [Arguments]       ${path}           ${file_name}
-    ${user.name} =    Set Variable If   '${SECURITY_ENABLED}'=='false'   &user.name=${USERNAME}      ${EMPTY}
-    ${curl_extra_commands} =            Set Variable If     '${SECURITY_ENABLED}'=='true'       --negotiate -u :    ${EMPTY}
-    ${final_url} =    Catenate          SEPARATOR=      ${URL}  ${path}  ?op=CREATE     ${user.name}
-    ${output}         Run process       curl -X PUT ${curl_extra_commands} "${final_url}"   shell=True
-    Should Be Equal As Integers         ${output.rc}    0
-    ${final_url2} =   Catenate          SEPARATOR=      ${URL}  ${path}  ?op=CREATE&data=true       ${user.name}
-    ${output2}        Run process       curl -X PUT -T ${file_name} ${curl_extra_commands} "${final_url2}" -H"Content-Type: application/octet-stream"   shell=True
-    Should Be Equal As Integers         ${output2.rc}    0
-    [return]          ${output2}
-
-Create file
-    [Arguments]     ${file_name}
-    Run process     touch ${file_name}  shell=True
-    Run process     echo "Hello world!">${file_name}     shell=True
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/httpfs/operations_tests.robot b/hadoop-ozone/dist/src/main/smoketest/httpfs/operations_tests.robot
deleted file mode 100644
index a4b19c89fa..0000000000
--- a/hadoop-ozone/dist/src/main/smoketest/httpfs/operations_tests.robot
+++ /dev/null
@@ -1,164 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation       HttpFS gateway test with curl commands
-Library             Process
-Library             String
-Library             BuiltIn
-Resource            operations.robot
-Resource            ../lib/os.robot
-Resource            ../commonlib.robot
-Suite Setup         Generate volume
-
-*** Variables ***
-${volume}                      generated
-
-*** Keywords ***
-Generate volume
-   ${random} =         Generate Random String  5  [LOWER]
-   Set Suite Variable  ${volume}  ${random}
-
-Kinit admin
-    Wait Until Keyword Succeeds      2min       10sec      Execute      kinit -k om/om@EXAMPLE.COM -t /etc/security/keytabs/om.keytab
-
-*** Test Cases ***
-Kinit admin user
-    Pass Execution If       '${SECURITY_ENABLED}'=='false'       This is for secured environment
-    Kinit admin
-
-Create volume
-    ${vol} =     Execute curl command    ${volume}    MKDIRS      -X PUT
-    Should contain  ${vol.stdout}   true
-
-Set owner of volume
-    Pass Execution If       '${SECURITY_ENABLED}'=='false'       This is for secured environment
-    ${rc} =                             Run And Return Rc       ozone sh volume update --user=testuser /${volume}
-    Should Be Equal As Integers         ${rc}       0
-
-Kinit testuser
-    Pass Execution If       '${SECURITY_ENABLED}'=='false'       This is for secured environment
-    Kinit test user     testuser     testuser.keytab
-
-Create first bucket
-    ${bucket} =     Execute curl command    ${volume}/buck1          MKDIRS      -X PUT
-    Should contain  ${bucket.stdout}   true
-
-Create second bucket
-    ${bucket} =     Execute curl command    ${volume}/buck2          MKDIRS      -X PUT
-    Should contain  ${bucket.stdout}   true
-
-Create local testfile
-    Create file       testfile
-
-Create testfile
-    ${file} =       Execute create file command     ${volume}/buck1/testfile     testfile
-    Should contain     ${file.stdout}     http://httpfs:14000/webhdfs/v1/${volume}/buck1/testfile
-
-Read file
-    ${file} =       Execute curl command    ${volume}/buck1/testfile     OPEN    -L
-    Should contain     ${file.stdout}     Hello world!
-
-# Missing functionality, not working properly yet.
-# List directory iteratively
-    # ${list} =       Execute curl command    vol1          LISTSTATUS_BATCH&startAfter=buck1      ${EMPTY}
-    # Should contain  ${list.stdout}     DirectoryListing    buck2
-    # Should not contain          ${list.stdout}             buck1
-
-Delete bucket
-    ${bucket} =     Execute curl command    ${volume}/buck2          DELETE      -X DELETE
-    Should contain  ${bucket.stdout}   true
-
-Get status of bucket
-    ${status} =     Execute curl command    ${volume}/buck1          GETFILESTATUS      ${EMPTY}
-    Should contain  ${status.stdout}   FileStatus  DIRECTORY
-
-Get status of file
-    ${status} =     Execute curl command    ${volume}/buck1/testfile          GETFILESTATUS      ${EMPTY}
-    Should contain  ${status.stdout}   FileStatus  FILE    13
-
-List bucket
-    ${list} =       Execute curl command    ${volume}/buck1          LISTSTATUS      ${EMPTY}
-    Should contain  ${list.stdout}     FileStatus  testfile    FILE    13
-
-List file
-    ${list} =       Execute curl command    ${volume}/buck1/testfile          LISTSTATUS      ${EMPTY}
-    Should contain  ${list.stdout}     FileStatus  FILE    13
-
-Get content summary of directory
-    ${summary} =    Execute curl command    ${volume}          GETCONTENTSUMMARY      ${EMPTY}
-    Should contain  ${summary.stdout}  ContentSummary      "directoryCount":2      "fileCount":1
-
-Get quota usage of directory
-    ${usage} =      Execute curl command    ${volume}          GETQUOTAUSAGE      ${EMPTY}
-    Should contain  ${usage.stdout}    QuotaUsage          "fileAndDirectoryCount":3
-
-Get home directory
-    ${home} =       Execute curl command    ${EMPTY}          GETHOMEDIRECTORY      ${EMPTY}
-    ${user} =       Set Variable If     '${SECURITY_ENABLED}'=='true'   testuser    ${USERNAME}
-    Should contain  ${home.stdout}     "Path":"\\/user\\/${user}"
-
-Get trash root
-    ${trash} =      Execute curl command    ${volume}/buck1/testfile          GETTRASHROOT      ${EMPTY}
-    ${user} =       Set Variable If     '${SECURITY_ENABLED}'=='true'   testuser    ${USERNAME}
-    Should contain  ${trash.stdout}    "Path":"\\/${volume}\\/buck1\\/.Trash\\/${user}"
-
-# Missing functionality, not working yet.
-# Set permission of bucket
-    # ${status} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json} =       Evaluate                json.loads('''${status.stdout}''')          json
-    # ${permission} =     Set Variable     ${json["FileStatus"]["permission"]}
-    # Execute curl command    vol1/buck1          SETPERMISSION&permission=666      -X PUT
-    # ${status_after} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json_after} =       evaluate                json.loads('''${status_after.stdout}''')    json
-    # ${permission_after} =   Set Variable     ${json_after["FileStatus"]["permission"]}
-    # Should be equal As Integers     666   ${permission_after}
-
-# Missing functionality, not working properly yet.
-# Set replication factor of bucket
-    # ${status} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json} =       Evaluate                json.loads('''${status.stdout}''')          json
-    # ${factor} =     Set Variable     ${json["FileStatus"]["replication"]}
-    # ${cmd} =        Execute curl command    vol1/buck1          SETREPLICATION&replication=1      -X PUT
-    # Should contain  ${cmd.stdout}      true
-    # ${status_after} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json_after} =       evaluate                json.loads('''${status_after.stdout}''')    json
-    # ${factor_after} =   Set Variable     ${json_after["FileStatus"]["replication"]}
-    # Should be equal As Integers     1   ${factor_after}
-
-# Missing functionality, not working properly yet.
-#Set access and modification time of bucket
-    # ${status} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json} =       Evaluate                json.loads('''${status.stdout}''')          json
-    # ${access} =     Set Variable     ${json["FileStatus"]["accessTime"]}
-    # ${mod} =     Set Variable     ${json["FileStatus"]["modificationTime"]}
-    # Execute curl command    vol1/buck1          SETTIMES&modificationtime=10&accesstime=10      -X PUT
-    # ${status_after} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json_after} =       evaluate                json.loads('''${status_after.stdout}''')    json
-    # ${access_after} =   Set Variable     ${json_after["FileStatus"]["accessTime"]}
-    # ${mod_after} =   Set Variable     ${json_after["FileStatus"]["modificationTime"]}
-    # Should be equal As Integers     10   ${access_after}
-    # Should be equal As Integers     10   ${mod_after}
-
-# Missing functionality, not working properly yet.
-# Set owner of bucket
-    # ${status} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json} =       Evaluate                json.loads('''${status.stdout}''')          json
-    # ${owner} =     Set Variable     ${json["FileStatus"]["owner"]}
-    # Execute curl command    vol1/buck1          SETOWNER&owner=hadoop      -X PUT
-    # ${status_after} =     Execute curl command    vol1/buck1          GETFILESTATUS      ${EMPTY}
-    # ${json_after} =       evaluate                json.loads('''${status_after.stdout}''')    json
-    # ${owner_after} =   Set Variable     ${json_after["FileStatus"]["owner"]}
-    # Should be equal     hadoop   ${owner_after}
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone
index bb86f56d40..b6ff261e33 100755
--- a/hadoop-ozone/dist/src/shell/ozone/ozone
+++ b/hadoop-ozone/dist/src/shell/ozone/ozone
@@ -48,7 +48,6 @@ function ozone_usage
   ozone_add_subcommand "om" daemon "Ozone Manager"
   ozone_add_subcommand "scm" daemon "run the Storage Container Manager service"
   ozone_add_subcommand "s3g" daemon "run the S3 compatible REST gateway"
-  ozone_add_subcommand "httpfs" daemon "run the HTTPFS compatible REST gateway"
   ozone_add_subcommand "csi" daemon "run the standalone CSI daemon"
   ozone_add_subcommand "recon" daemon "run the Recon service"
   ozone_add_subcommand "sh" client "command line interface for object store operations"
@@ -166,12 +165,6 @@ function ozonecmd_case
       OZONE_S3G_OPTS="${OZONE_S3G_OPTS} -Dlog4j.configurationFile=${OZONE_CONF_DIR}/s3g-audit-log4j2.properties"
       OZONE_RUN_ARTIFACT_NAME="ozone-s3gateway"
     ;;
-    httpfs)
-      OZONE_SUBCMD_SUPPORTDAEMONIZATION="true"
-      OZONE_OPTS="${OZONE_OPTS} -Dhttpfs.home.dir=${OZONE_HOME} -Dhttpfs.config.dir=${OZONE_CONF_DIR} -Dhttpfs.log.dir=${OZONE_HOME}/log -Dhttpfs.temp.dir=${OZONE_HOME}/temp"
-      OZONE_CLASSNAME='org.apache.ozone.fs.http.server.HttpFSServerWebServer'
-      OZONE_RUN_ARTIFACT_NAME="ozone-httpfsgateway"
-    ;;
     tenant)
       OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.tenant.TenantShell
       OZONE_RUN_ARTIFACT_NAME="ozone-tools"
diff --git a/hadoop-ozone/httpfsgateway/README.txt b/hadoop-ozone/httpfsgateway/README.txt
deleted file mode 100644
index c2f4d64e20..0000000000
--- a/hadoop-ozone/httpfsgateway/README.txt
+++ /dev/null
@@ -1,17 +0,0 @@
------------------------------------------------------------------------------
-HttpFS - Hadoop HDFS over HTTP
-
-HttpFS is a server that provides a REST HTTP gateway to HDFS with full
-filesystem read & write capabilities.
-
-HttpFS can be used to transfer data between clusters running different
-versions of Hadoop (overcoming RPC versioning issues), for example using
-Hadoop DistCP.
-
-HttpFS can be used to access data in HDFS on a cluster behind of a firewall
-(the HttpFS server acts as a gateway and is the only system that is allowed
-to cross the firewall into the cluster).
-
-HttpFS can be used to access data in HDFS using HTTP utilities (such as curl
-and wget) and HTTP libraries Perl from other languages than Java.
------------------------------------------------------------------------------
diff --git a/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index c55b1d94e1..0000000000
--- a/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Class name="org.apache.ozone.lib.service.instrumentation.InstrumentationService" />
-    <Method name="getToAdd" />
-    <Bug pattern="UL_UNRELEASED_LOCK" />
-  </Match>
-  <Match>
-    <Class name="org.apache.ozone.fs.http.server.HttpFSServerWebApp" />
-    <Method name="destroy" />
-    <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
-  </Match>
-  <Match>
-    <Class name="org.apache.ozone.lib.servlet.ServerWebApp" />
-    <Field name="authority" />
-    <Bug pattern="IS2_INCONSISTENT_SYNC" />
-  </Match>
-  <Match>
-    <Class name="org.apache.ozone.lib.service.hadoop.FileSystemAccessService" />
-    <Method name="closeFileSystem" />
-    <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
-  </Match>
-  <Match>
-    <Source name="~.*Test.*\.java" />
-  </Match>
-</FindBugsFilter>
diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml
deleted file mode 100644
index f7ff6e3eb4..0000000000
--- a/hadoop-ozone/httpfsgateway/pom.xml
+++ /dev/null
@@ -1,333 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-                      https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.ozone</groupId>
-    <artifactId>ozone</artifactId>
-    <version>1.4.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>ozone-httpfsgateway</artifactId>
-  <version>1.4.0-SNAPSHOT</version>
-  <packaging>jar</packaging>
-
-  <name>Apache Ozone HttpFS</name>
-  <description>Apache Ozone HttpFS</description>
-
-  <properties>
-    <httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
-    <httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
-    <maven.build.timestamp.format>yyyy-MM-dd'T'HH:mm:ssZ</maven.build.timestamp.format>
-    <httpfs.build.timestamp>${maven.build.timestamp}</httpfs.build.timestamp>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.ozone</groupId>
-      <artifactId>ozone-filesystem-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ozone</groupId>
-      <artifactId>ozone-filesystem</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.googlecode.json-simple</groupId>
-      <artifactId>json-simple</artifactId>
-      <version>1.1.1</version>
-      <scope>compile</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>junit</groupId>
-          <artifactId>junit</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-core</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-server</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-servlet</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>javax.servlet</groupId>
-      <artifactId>javax.servlet-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-webapp</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-codec</groupId>
-      <artifactId>commons-codec</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>ch.qos.reload4j</groupId>
-      <artifactId>reload4j</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-reload4j</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.curator</groupId>
-      <artifactId>curator-framework</artifactId>
-      <scope>runtime</scope>
-      <version>4.2.0</version>
-      <!-- These were excluded as non of them is used in the HttpFS module, but all of them would be a new unnecessary
-       dependency to the Ozone project, we would also need to update the jar-report.txt with that. -->
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.commons</groupId>
-          <artifactId>commons-math</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.yetus</groupId>
-          <artifactId>audience-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.mail</groupId>
-          <artifactId>javax.mail</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-      <version>2.3.0</version>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <resources>
-      <resource>
-        <directory>src/main/resources</directory>
-        <filtering>true</filtering>
-        <includes>
-          <include>httpfs.properties</include>
-        </includes>
-      </resource>
-      <resource>
-        <directory>src/main/resources</directory>
-        <filtering>false</filtering>
-        <excludes>
-          <exclude>httpfs.properties</exclude>
-        </excludes>
-      </resource>
-    </resources>
-    <testResources>
-      <testResource>
-        <directory>${basedir}/src/test/resources</directory>
-        <filtering>false</filtering>
-      </testResource>
-      <testResource>
-        <directory>${basedir}/src/test/resources</directory>
-        <filtering>true</filtering>
-      </testResource>
-    </testResources>
-
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>${maven-checkstyle-plugin.version}</version>
-        <configuration>
-          <includeTestSourceDirectory>false</includeTestSourceDirectory>
-        </configuration>
-      </plugin>
-      <plugin>
-        <!-- workaround for filtered/unfiltered resources in same directory -->
-        <!-- remove when maven-eclipse-plugin 2.9 is available -->
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-eclipse-plugin</artifactId>
-        <version>2.6</version>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <testFailureIgnore>${ignoreTestFailure}</testFailureIgnore>
-          <threadCount>1</threadCount>
-          <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
-          <properties>
-            <property>
-              <name>listener</name>
-              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
-            </property>
-          </properties>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-javadoc-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>javadoc-no-fork</goal>
-            </goals>
-            <phase>site</phase>
-            <configuration>
-              <quiet>true</quiet>
-              <verbose>false</verbose>
-              <source>${maven.compile.source}</source>
-              <charset>${maven.compile.encoding}</charset>
-              <groups>
-                <group>
-                  <title>HttpFs API</title>
-                  <packages>*</packages>
-                </group>
-              </groups>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <version>1.8</version>
-        <executions>
-          <execution>
-            <id>create-web-xmls</id>
-            <phase>generate-test-resources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${project.build.directory}/test-classes/webapp"/>
-
-                <copy todir="${project.build.directory}/test-classes/webapp">
-                  <fileset dir="${basedir}/src/main/webapp"/>
-                </copy>
-              </target>
-            </configuration>
-          </execution>
-          <execution>
-            <id>site</id>
-            <phase>site</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <xslt in="${basedir}/src/main/resources/httpfs-default.xml"
-                      out="${project.build.directory}/site/httpfs-default.html"
-                      style="${basedir}/src/site/configuration.xsl"/>
-              </target>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>dist</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-assembly-plugin</artifactId>
-            <dependencies>
-              <dependency>
-                <groupId>org.apache.hadoop</groupId>
-                <artifactId>hadoop-assemblies</artifactId>
-                <version>${hadoop.version}</version>
-              </dependency>
-            </dependencies>
-            <executions>
-              <execution>
-                <id>dist</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>single</goal>
-                </goals>
-                <configuration>
-                  <finalName>${project.artifactId}-${project.version}</finalName>
-                  <appendAssemblyId>false</appendAssemblyId>
-                  <attach>false</attach>
-                  <descriptorRefs>
-                    <descriptorRef>hadoop-httpfs-dist</descriptorRef>
-                  </descriptorRefs>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-env.sh b/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-env.sh
deleted file mode 100644
index 5b7b05d3ba..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-env.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License. See accompanying LICENSE file.
-#
-
-# Set httpfs specific environment variables here.
-#
-# hadoop-env.sh is read prior to this file.
-#
-
-# HTTPFS config directory
-#
-# export HTTPFS_CONFIG=${HADOOP_CONF_DIR}
-
-# HTTPFS log directory
-#
-# export HTTPFS_LOG=${HADOOP_LOG_DIR}
-
-# HTTPFS temporary directory
-#
-# export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp
-
-# The HTTP port used by HTTPFS
-#
-# export HTTPFS_HTTP_PORT=14000
-
-# The maximum number of HTTP handler threads
-#
-# export HTTPFS_MAX_THREADS=1000
-
-# The hostname HttpFS server runs on
-#
-# export HTTPFS_HTTP_HOSTNAME=$(hostname -f)
-
-# The maximum size of HTTP header
-#
-# export HTTPFS_MAX_HTTP_HEADER_SIZE=65536
-
-# Whether SSL is enabled
-#
-# export HTTPFS_SSL_ENABLED=false
-
-# The location of the SSL keystore if using SSL
-#
-# export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore
-
-# The password of the SSL keystore if using SSL
-#
-# export HTTPFS_SSL_KEYSTORE_PASS=password
\ No newline at end of file
diff --git a/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-log4j.properties b/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-log4j.properties
deleted file mode 100644
index 1d19733f31..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-log4j.properties
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time
-# Setup sets its value to '${httpfs.home}/logs'
-
-log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd
-log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log
-log4j.appender.httpfs.Append=true
-log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout
-log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
-
-log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log
-log4j.appender.httpfsaudit.Append=true
-log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
-
-log4j.logger.httpfsaudit=INFO, httpfsaudit
-
-log4j.logger.org.apache.ozone.fs.http.server=INFO, httpfs
-log4j.logger.org.apache.ozone.lib=INFO, httpfs
diff --git a/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-site.xml b/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-site.xml
deleted file mode 100644
index 4a718e1668..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/conf/httpfs-site.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-</configuration>
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/HttpFSConstants.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/HttpFSConstants.java
deleted file mode 100644
index 488e966343..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/HttpFSConstants.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-
-/**
- * Constants for the HttpFs server side implementations.
- */
-public interface HttpFSConstants {
-
-  String HTTP_GET = "GET";
-  String HTTP_PUT = "PUT";
-  String HTTP_POST = "POST";
-  String HTTP_DELETE = "DELETE";
-
-  String SCHEME = "webhdfs";
-  String OP_PARAM = "op";
-  String DO_AS_PARAM = "doas";
-  String OVERWRITE_PARAM = "overwrite";
-  String REPLICATION_PARAM = "replication";
-  String BLOCKSIZE_PARAM = "blocksize";
-  String PERMISSION_PARAM = "permission";
-  String UNMASKED_PERMISSION_PARAM = "unmaskedpermission";
-  String ACLSPEC_PARAM = "aclspec";
-  String DESTINATION_PARAM = "destination";
-  String RECURSIVE_PARAM = "recursive";
-  String SOURCES_PARAM = "sources";
-  String OWNER_PARAM = "owner";
-  String GROUP_PARAM = "group";
-  String MODIFICATION_TIME_PARAM = "modificationtime";
-  String ACCESS_TIME_PARAM = "accesstime";
-  String XATTR_NAME_PARAM = "xattr.name";
-  String XATTR_VALUE_PARAM = "xattr.value";
-  String XATTR_SET_FLAG_PARAM = "flag";
-  String XATTR_ENCODING_PARAM = "encoding";
-  String NEW_LENGTH_PARAM = "newlength";
-  String START_AFTER_PARAM = "startAfter";
-  String POLICY_NAME_PARAM = "storagepolicy";
-  String SNAPSHOT_NAME_PARAM = "snapshotname";
-  String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
-  String FSACTION_MODE_PARAM = "fsaction";
-  String EC_POLICY_NAME_PARAM = "ecpolicy";
-  Short DEFAULT_PERMISSION = 0755;
-  String ACLSPEC_DEFAULT = "";
-  String RENAME_JSON = "boolean";
-  String TRUNCATE_JSON = "boolean";
-  String DELETE_JSON = "boolean";
-  String MKDIRS_JSON = "boolean";
-  String HOME_DIR_JSON = "Path";
-  String TRASH_DIR_JSON = "Path";
-  String SET_REPLICATION_JSON = "boolean";
-  String UPLOAD_CONTENT_TYPE = "application/octet-stream";
-  String SNAPSHOT_JSON = "Path";
-  String FILE_STATUSES_JSON = "FileStatuses";
-  String FILE_STATUS_JSON = "FileStatus";
-  String PATH_SUFFIX_JSON = "pathSuffix";
-  String TYPE_JSON = "type";
-  String LENGTH_JSON = "length";
-  String OWNER_JSON = "owner";
-  String GROUP_JSON = "group";
-  String PERMISSION_JSON = "permission";
-  String ACCESS_TIME_JSON = "accessTime";
-  String MODIFICATION_TIME_JSON = "modificationTime";
-  String BLOCK_SIZE_JSON = "blockSize";
-  String CHILDREN_NUM_JSON = "childrenNum";
-  String FILE_ID_JSON = "fileId";
-  String REPLICATION_JSON = "replication";
-  String STORAGEPOLICY_JSON = "storagePolicy";
-  String ECPOLICYNAME_JSON = "ecPolicy";
-  String XATTRS_JSON = "XAttrs";
-  String XATTR_NAME_JSON = "name";
-  String XATTR_VALUE_JSON = "value";
-  String XATTRNAMES_JSON = "XAttrNames";
-  String ECPOLICY_JSON = "ecPolicyObj";
-  String SYMLINK_JSON = "symlink";
-  String FILE_CHECKSUM_JSON = "FileChecksum";
-  String CHECKSUM_ALGORITHM_JSON = "algorithm";
-  String CHECKSUM_BYTES_JSON = "bytes";
-  String CHECKSUM_LENGTH_JSON = "length";
-  String CONTENT_SUMMARY_JSON = "ContentSummary";
-  String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON
-      = "directoryCount";
-  String CONTENT_SUMMARY_ECPOLICY_JSON = "ecPolicy";
-  String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount";
-  String CONTENT_SUMMARY_LENGTH_JSON = "length";
-  String QUOTA_USAGE_JSON = "QuotaUsage";
-  String QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON =
-      "fileAndDirectoryCount";
-  String QUOTA_USAGE_QUOTA_JSON = "quota";
-  String QUOTA_USAGE_SPACE_CONSUMED_JSON = "spaceConsumed";
-  String QUOTA_USAGE_SPACE_QUOTA_JSON = "spaceQuota";
-  String QUOTA_USAGE_CONSUMED_JSON = "consumed";
-  String QUOTA_USAGE_TYPE_QUOTA_JSON = "typeQuota";
-  String ACL_STATUS_JSON = "AclStatus";
-  String ACL_STICKY_BIT_JSON = "stickyBit";
-  String ACL_ENTRIES_JSON = "entries";
-  String ACL_BIT_JSON = "aclBit";
-  String ENC_BIT_JSON = "encBit";
-  String EC_BIT_JSON = "ecBit";
-  String SNAPSHOT_BIT_JSON = "snapshotEnabled";
-  String DIRECTORY_LISTING_JSON = "DirectoryListing";
-  String PARTIAL_LISTING_JSON = "partialListing";
-  String REMAINING_ENTRIES_JSON = "remainingEntries";
-  String STORAGE_POLICIES_JSON = "BlockStoragePolicies";
-  String STORAGE_POLICY_JSON = "BlockStoragePolicy";
-  int HTTP_TEMPORARY_REDIRECT = 307;
-  String SERVICE_NAME = "/webhdfs";
-  String SERVICE_VERSION = "/v1";
-  String SERVICE_PATH = SERVICE_NAME + SERVICE_VERSION;
-  byte[] EMPTY_BYTES = {};
-
-  /**
-   * Converts a <code>FsPermission</code> to a Unix octal representation.
-   *
-   * @param p the permission.
-   *
-   * @return the Unix string symbolic reprentation.
-   */
-  static String permissionToString(FsPermission p) {
-    return  Integer.toString((p == null) ? DEFAULT_PERMISSION : p.toShort(), 8);
-  }
-
-  /**
-   * File types.
-   */
-  enum FILETYPE {
-    FILE, DIRECTORY, SYMLINK;
-
-    public static FILETYPE getType(FileStatus fileStatus) {
-      if (fileStatus.isFile()) {
-        return FILE;
-      }
-      if (fileStatus.isDirectory()) {
-        return DIRECTORY;
-      }
-      if (fileStatus.isSymlink()) {
-        return SYMLINK;
-      }
-      throw new IllegalArgumentException("Could not determine filetype for: " +
-          fileStatus.getPath());
-    }
-  }
-
-  /**
-   * Operation types.
-   */
-  @InterfaceAudience.Private
-  enum Operation {
-    OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
-    GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
-    GETQUOTAUSAGE(HTTP_GET), GETFILECHECKSUM(HTTP_GET),
-    GETFILEBLOCKLOCATIONS(HTTP_GET), INSTRUMENTATION(HTTP_GET),
-    GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET),
-    APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
-    CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
-    SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
-    MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
-    REMOVEDEFAULTACL(HTTP_PUT), REMOVEACL(HTTP_PUT), SETACL(HTTP_PUT),
-    DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
-    REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET),
-    GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET),
-    SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST),
-    ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT),
-    CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
-    RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET),
-    GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET), GETSERVERDEFAULTS(HTTP_GET),
-    CHECKACCESS(HTTP_GET), SETECPOLICY(HTTP_PUT), GETECPOLICY(HTTP_GET),
-    UNSETECPOLICY(HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT);
-
-    private String httpMethod;
-
-    Operation(String httpMethod) {
-      this.httpMethod = httpMethod;
-    }
-
-    public String getMethod() {
-      return httpMethod;
-    }
-
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/package-info.java
deleted file mode 100644
index 4643f1188c..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Basic server implementations for the HttpFS and its constants.
- */
-package org.apache.ozone.fs.http;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/CheckUploadContentTypeFilter.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/CheckUploadContentTypeFilter.java
deleted file mode 100644
index 03e3f8e0a8..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/CheckUploadContentTypeFilter.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.fs.http.server;
-
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.fs.http.HttpFSConstants;
-import org.apache.hadoop.util.StringUtils;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Filter that Enforces the content-type to be application/octet-stream for
- * POST and PUT requests.
- */
-@InterfaceAudience.Private
-public class CheckUploadContentTypeFilter implements Filter {
-
-  private static final Set<String> UPLOAD_OPERATIONS = new HashSet<String>();
-
-  static {
-    UPLOAD_OPERATIONS.add(HttpFSConstants.Operation.APPEND.toString());
-    UPLOAD_OPERATIONS.add(HttpFSConstants.Operation.CREATE.toString());
-  }
-
-  /**
-   * Initializes the filter.
-   * <p>
-   * This implementation is a NOP.
-   *
-   * @param config filter configuration.
-   *
-   * @throws ServletException thrown if the filter could not be initialized.
-   */
-  @Override
-  public void init(FilterConfig config) throws ServletException {
-  }
-
-  /**
-   * Enforces the content-type to be application/octet-stream for
-   * POST and PUT requests.
-   *
-   * @param request servlet request.
-   * @param response servlet response.
-   * @param chain filter chain.
-   *
-   * @throws IOException thrown if an IO error occurs.
-   * @throws ServletException thrown if a servlet error occurs.
-   */
-  @Override
-  public void doFilter(ServletRequest request, ServletResponse response,
-                       FilterChain chain)
-      throws IOException, ServletException {
-    boolean contentTypeOK = true;
-    HttpServletRequest httpReq = (HttpServletRequest) request;
-    HttpServletResponse httpRes = (HttpServletResponse) response;
-    String method = httpReq.getMethod();
-    if (method.equals("PUT") || method.equals("POST")) {
-      String op = httpReq.getParameter(HttpFSConstants.OP_PARAM);
-      if (op != null && UPLOAD_OPERATIONS.contains(
-          StringUtils.toUpperCase(op))) {
-        if ("true".equalsIgnoreCase(httpReq
-            .getParameter(HttpFSParametersProvider.DataParam.NAME))) {
-          String contentType = httpReq.getContentType();
-          contentTypeOK =
-              HttpFSConstants.UPLOAD_CONTENT_TYPE.equalsIgnoreCase(contentType);
-        }
-      }
-    }
-    if (contentTypeOK) {
-      chain.doFilter(httpReq, httpRes);
-    } else {
-      httpRes.sendError(HttpServletResponse.SC_BAD_REQUEST,
-                        "Data upload requests must have content-type set to '" +
-                            HttpFSConstants.UPLOAD_CONTENT_TYPE + "'");
-
-    }
-  }
-
-  /**
-   * Destroys the filter.
-   * <p>
-   * This implementation is a NOP.
-   */
-  @Override
-  public void destroy() {
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java
deleted file mode 100644
index 59945f829e..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java
+++ /dev/null
@@ -1,2105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockStoragePolicySpi;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileChecksum;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FilterFileSystem;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.GlobFilter;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.QuotaUsage;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.XAttrSetFlag;
-import org.apache.ozone.fs.http.HttpFSConstants;
-import org.apache.ozone.fs.http.HttpFSConstants.FILETYPE;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
-import org.apache.ozone.lib.service.FileSystemAccess;
-import org.apache.hadoop.util.StringUtils;
-import org.json.simple.JSONArray;
-import org.json.simple.JSONObject;
-import org.apache.hadoop.fs.permission.FsCreateModes;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-/**
- * FileSystem operation executors used by {@link HttpFSServer}.
- */
-@InterfaceAudience.Private
-public final class FSOperations {
-
-  private static int bufferSize = 4096;
-  private static final String HTTPFS_BUFFER_SIZE_KEY = "httpfs.buffer.size";
-  private static final int HTTP_BUFFER_SIZE_DEFAULT = 4096;
-
-  private FSOperations() {
-    // not called
-  }
-  /**
-   * Set the buffer size. The size is set during the initialization of
-   * HttpFSServerWebApp.
-   * @param conf the configuration to get the bufferSize
-   */
-  public static void setBufferSize(Configuration conf) {
-    bufferSize = conf.getInt(HTTPFS_BUFFER_SIZE_KEY,
-        HTTP_BUFFER_SIZE_DEFAULT);
-  }
-
-  /**
-   * @param fileStatus a FileStatus object
-   * @return JSON map suitable for wire transport
-   */
-  private static Map<String, Object> toJson(FileStatus fileStatus) {
-    Map<String, Object> json = new LinkedHashMap<>();
-    json.put(HttpFSConstants.FILE_STATUS_JSON, toJsonInner(fileStatus, true));
-    return json;
-  }
-
-  /**
-   * @param fileStatuses list of FileStatus objects
-   * @param isFile is the fileStatuses from a file path
-   * @return JSON map suitable for wire transport
-   */
-  @SuppressWarnings({"unchecked"})
-  private static Map<String, Object> toJson(FileStatus[] fileStatuses,
-      boolean isFile) {
-    Map<String, Object> json = new LinkedHashMap<>();
-    Map<String, Object> inner = new LinkedHashMap<>();
-    JSONArray statuses = new JSONArray();
-    for (FileStatus f : fileStatuses) {
-      statuses.add(toJsonInner(f, isFile));
-    }
-    inner.put(HttpFSConstants.FILE_STATUS_JSON, statuses);
-    json.put(HttpFSConstants.FILE_STATUSES_JSON, inner);
-    return json;
-  }
-
-  /**
-   * Not meant to be called directly except by the other toJson functions.
-   */
-  private static Map<String, Object> toJsonInner(FileStatus fileStatus,
-      boolean emptyPathSuffix) {
-    Map<String, Object> json = new LinkedHashMap<String, Object>();
-    json.put(HttpFSConstants.PATH_SUFFIX_JSON,
-        (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-    FILETYPE fileType = FILETYPE.getType(fileStatus);
-    json.put(HttpFSConstants.TYPE_JSON, fileType.toString());
-    if (fileType.equals(FILETYPE.SYMLINK)) {
-      // put the symlink into Json
-      try {
-        json.put(HttpFSConstants.SYMLINK_JSON,
-            fileStatus.getSymlink().getName());
-      } catch (IOException e) {
-        // Can't happen.
-      }
-    }
-    json.put(HttpFSConstants.LENGTH_JSON, fileStatus.getLen());
-    json.put(HttpFSConstants.OWNER_JSON, fileStatus.getOwner());
-    json.put(HttpFSConstants.GROUP_JSON, fileStatus.getGroup());
-    json.put(HttpFSConstants.PERMISSION_JSON,
-        HttpFSConstants.permissionToString(fileStatus.getPermission()));
-    json.put(HttpFSConstants.ACCESS_TIME_JSON, fileStatus.getAccessTime());
-    json.put(HttpFSConstants.MODIFICATION_TIME_JSON,
-        fileStatus.getModificationTime());
-    json.put(HttpFSConstants.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
-    json.put(HttpFSConstants.REPLICATION_JSON, fileStatus.getReplication());
-    if (fileStatus instanceof HdfsFileStatus) {
-      // Add HDFS-specific fields to response
-      HdfsFileStatus hdfsFileStatus = (HdfsFileStatus) fileStatus;
-      json.put(HttpFSConstants.CHILDREN_NUM_JSON,
-          hdfsFileStatus.getChildrenNum());
-      json.put(HttpFSConstants.FILE_ID_JSON,
-          hdfsFileStatus.getFileId());
-      json.put(HttpFSConstants.STORAGEPOLICY_JSON,
-          hdfsFileStatus.getStoragePolicy());
-//      if (hdfsFileStatus.getErasureCodingPolicy() != null) {
-//        json.put(HttpFSFileSystem.ECPOLICYNAME_JSON,
-//            hdfsFileStatus.getErasureCodingPolicy().getName());
-//        json.put(HttpFSFileSystem.ECPOLICY_JSON,
-//            JsonUtil.getEcPolicyAsMap(
-//                hdfsFileStatus.getErasureCodingPolicy()));
-//      }
-    }
-    if (fileStatus.getPermission().getAclBit()) {
-      json.put(HttpFSConstants.ACL_BIT_JSON, true);
-    }
-    if (fileStatus.getPermission().getEncryptedBit()) {
-      json.put(HttpFSConstants.ENC_BIT_JSON, true);
-    }
-    if (fileStatus.getPermission().getErasureCodedBit()) {
-      json.put(HttpFSConstants.EC_BIT_JSON, true);
-    }
-    if (fileStatus.isSnapshotEnabled()) {
-      json.put(HttpFSConstants.SNAPSHOT_BIT_JSON, true);
-    }
-    return json;
-  }
-
-  /**
-   * Serializes a DirectoryEntries object into the JSON for a
-   * WebHDFS {@link org.apache.hadoop.hdfs.protocol.DirectoryListing}.
-   * <p>
-   * These two classes are slightly different, due to the impedance
-   * mismatches between the WebHDFS and FileSystem APIs.
-   * @param entries
-   * @param isFile is the entries from a file path
-   * @return json
-   */
-  private static Map<String, Object> toJson(FileSystem.DirectoryEntries
-      entries, boolean isFile) {
-    Map<String, Object> json = new LinkedHashMap<>();
-    Map<String, Object> inner = new LinkedHashMap<>();
-    Map<String, Object> fileStatuses = toJson(entries.getEntries(), isFile);
-    inner.put(HttpFSConstants.PARTIAL_LISTING_JSON, fileStatuses);
-    inner.put(HttpFSConstants.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
-        : 0);
-    json.put(HttpFSConstants.DIRECTORY_LISTING_JSON, inner);
-    return json;
-  }
-
-  /** Converts an <code>AclStatus</code> object into a JSON object.
-   *
-   * @param aclStatus AclStatus object
-   *
-   * @return The JSON representation of the ACLs for the file
-   */
-  @SuppressWarnings({"unchecked"})
-  private static Map<String, Object> aclStatusToJSON(AclStatus aclStatus) {
-    Map<String, Object> json = new LinkedHashMap<String, Object>();
-    Map<String, Object> inner = new LinkedHashMap<String, Object>();
-    JSONArray entriesArray = new JSONArray();
-    inner.put(HttpFSConstants.OWNER_JSON, aclStatus.getOwner());
-    inner.put(HttpFSConstants.GROUP_JSON, aclStatus.getGroup());
-    inner.put(HttpFSConstants.PERMISSION_JSON,
-        HttpFSConstants.permissionToString(aclStatus.getPermission()));
-    inner.put(HttpFSConstants.ACL_STICKY_BIT_JSON, aclStatus.isStickyBit());
-    for (AclEntry e : aclStatus.getEntries()) {
-      entriesArray.add(e.toString());
-    }
-    inner.put(HttpFSConstants.ACL_ENTRIES_JSON, entriesArray);
-    json.put(HttpFSConstants.ACL_STATUS_JSON, inner);
-    return json;
-  }
-
-  /**
-   * Converts a <code>FileChecksum</code> object into a JSON array
-   * object.
-   *
-   * @param checksum file checksum.
-   *
-   * @return The JSON representation of the file checksum.
-   */
-  @SuppressWarnings({"unchecked"})
-  private static Map fileChecksumToJSON(FileChecksum checksum) {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSConstants.CHECKSUM_ALGORITHM_JSON,
-        checksum.getAlgorithmName());
-    json.put(HttpFSConstants.CHECKSUM_BYTES_JSON,
-             org.apache.hadoop.util.StringUtils
-                 .byteToHexString(checksum.getBytes()));
-    json.put(HttpFSConstants.CHECKSUM_LENGTH_JSON, checksum.getLength());
-    Map response = new LinkedHashMap();
-    response.put(HttpFSConstants.FILE_CHECKSUM_JSON, json);
-    return response;
-  }
-
-  /**
-   * Converts xAttrs to a JSON object.
-   *
-   * @param xAttrs file xAttrs.
-   * @param encoding format of xattr values.
-   *
-   * @return The JSON representation of the xAttrs.
-   * @throws IOException 
-   */
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  private static Map xAttrsToJSON(Map<String, byte[]> xAttrs, 
-      XAttrCodec encoding) throws IOException {
-    Map jsonMap = new LinkedHashMap();
-    JSONArray jsonArray = new JSONArray();
-    if (xAttrs != null) {
-      for (Entry<String, byte[]> e : xAttrs.entrySet()) {
-        Map json = new LinkedHashMap();
-        json.put(HttpFSConstants.XATTR_NAME_JSON, e.getKey());
-        if (e.getValue() != null) {
-          json.put(HttpFSConstants.XATTR_VALUE_JSON,
-              XAttrCodec.encodeValue(e.getValue(), encoding));
-        }
-        jsonArray.add(json);
-      }
-    }
-    jsonMap.put(HttpFSConstants.XATTRS_JSON, jsonArray);
-    return jsonMap;
-  }
-
-  /**
-   * Converts xAttr names to a JSON object.
-   *
-   * @param names file xAttr names.
-   *
-   * @return The JSON representation of the xAttr names.
-   * @throws IOException 
-   */
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  private static Map xAttrNamesToJSON(List<String> names) throws IOException {
-    Map jsonMap = new LinkedHashMap();
-    jsonMap.put(HttpFSConstants.XATTRNAMES_JSON,
-        JSONArray.toJSONString(names));
-    return jsonMap;
-  }
-
-  /**
-   * Converts a <code>ContentSummary</code> object into a JSON array
-   * object.
-   *
-   * @param contentSummary the content summary
-   *
-   * @return The JSON representation of the content summary.
-   */
-  @SuppressWarnings({"unchecked"})
-  private static Map contentSummaryToJSON(ContentSummary contentSummary) {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSConstants.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON,
-        contentSummary.getDirectoryCount());
-    json.put(HttpFSConstants.CONTENT_SUMMARY_ECPOLICY_JSON,
-        contentSummary.getErasureCodingPolicy());
-    json.put(HttpFSConstants.CONTENT_SUMMARY_FILE_COUNT_JSON,
-        contentSummary.getFileCount());
-    json.put(HttpFSConstants.CONTENT_SUMMARY_LENGTH_JSON,
-        contentSummary.getLength());
-    Map<String, Object> quotaUsageMap = quotaUsageToMap(contentSummary);
-    for (Map.Entry<String, Object> e : quotaUsageMap.entrySet()) {
-      // For ContentSummary we don't need this since we already have
-      // separate count for file and directory.
-      if (!e.getKey().equals(
-          HttpFSConstants.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON)) {
-        json.put(e.getKey(), e.getValue());
-      }
-    }
-    Map response = new LinkedHashMap();
-    response.put(HttpFSConstants.CONTENT_SUMMARY_JSON, json);
-    return response;
-  }
-
-  /**
-   * Converts a <code>QuotaUsage</code> object into a JSON array
-   * object.
-   */
-  @SuppressWarnings({"unchecked"})
-  private static Map quotaUsageToJSON(QuotaUsage quotaUsage) {
-    Map response = new LinkedHashMap();
-    Map quotaUsageMap = quotaUsageToMap(quotaUsage);
-    response.put(HttpFSConstants.QUOTA_USAGE_JSON, quotaUsageMap);
-    return response;
-  }
-
-  private static Map<String, Object> quotaUsageToMap(QuotaUsage quotaUsage) {
-    Map<String, Object> result = new LinkedHashMap<>();
-    result.put(HttpFSConstants.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON,
-        quotaUsage.getFileAndDirectoryCount());
-    result.put(HttpFSConstants.QUOTA_USAGE_QUOTA_JSON, quotaUsage.getQuota());
-    result.put(HttpFSConstants.QUOTA_USAGE_SPACE_CONSUMED_JSON,
-        quotaUsage.getSpaceConsumed());
-    result.put(HttpFSConstants.QUOTA_USAGE_SPACE_QUOTA_JSON,
-        quotaUsage.getSpaceQuota());
-    Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
-    for (StorageType t : StorageType.getTypesSupportingQuota()) {
-      long tQuota = quotaUsage.getTypeQuota(t);
-      if (tQuota != HdfsConstants.QUOTA_RESET) {
-        Map<String, Long> type = typeQuota.get(t.toString());
-        if (type == null) {
-          type = new TreeMap<>();
-          typeQuota.put(t.toString(), type);
-        }
-        type.put(HttpFSConstants.QUOTA_USAGE_QUOTA_JSON,
-            quotaUsage.getTypeQuota(t));
-        type.put(HttpFSConstants.QUOTA_USAGE_CONSUMED_JSON,
-            quotaUsage.getTypeConsumed(t));
-      }
-    }
-    result.put(HttpFSConstants.QUOTA_USAGE_TYPE_QUOTA_JSON, typeQuota);
-    return result;
-  }
-
-  /**
-   * Converts an object into a Json Map with with one key-value entry.
-   * <p/>
-   * It assumes the given value is either a JSON primitive type or a
-   * <code>JsonAware</code> instance.
-   *
-   * @param name name for the key of the entry.
-   * @param value for the value of the entry.
-   *
-   * @return the JSON representation of the key-value pair.
-   */
-  @SuppressWarnings("unchecked")
-  private static JSONObject toJSON(String name, Object value) {
-    JSONObject json = new JSONObject();
-    json.put(name, value);
-    return json;
-  }
-
-  @SuppressWarnings({ "unchecked" })
-  private static JSONObject storagePolicyToJSON(BlockStoragePolicySpi policy) {
-    BlockStoragePolicy p = (BlockStoragePolicy) policy;
-    JSONObject policyJson = new JSONObject();
-    policyJson.put("id", p.getId());
-    policyJson.put("name", p.getName());
-    policyJson.put("storageTypes", toJsonArray(p.getStorageTypes()));
-    policyJson.put("creationFallbacks", toJsonArray(p.getCreationFallbacks()));
-    policyJson.put("replicationFallbacks",
-        toJsonArray(p.getReplicationFallbacks()));
-    policyJson.put("copyOnCreateFile", p.isCopyOnCreateFile());
-    return policyJson;
-  }
-
-  @SuppressWarnings("unchecked")
-  private static JSONArray toJsonArray(StorageType[] storageTypes) {
-    JSONArray jsonArray = new JSONArray();
-    for (StorageType type : storageTypes) {
-      jsonArray.add(type.toString());
-    }
-    return jsonArray;
-  }
-
-  @SuppressWarnings("unchecked")
-  private static JSONObject storagePoliciesToJSON(
-      Collection<? extends BlockStoragePolicySpi> storagePolicies) {
-    JSONObject json = new JSONObject();
-    JSONArray jsonArray = new JSONArray();
-    JSONObject policies = new JSONObject();
-    if (storagePolicies != null) {
-      for (BlockStoragePolicySpi policy : storagePolicies) {
-        JSONObject policyMap = storagePolicyToJSON(policy);
-        jsonArray.add(policyMap);
-      }
-    }
-    policies.put(HttpFSConstants.STORAGE_POLICY_JSON, jsonArray);
-    json.put(HttpFSConstants.STORAGE_POLICIES_JSON, policies);
-    return json;
-  }
-
-  /**
-   * Executor that performs an append FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSAppend
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-    private InputStream is;
-    private Path path;
-
-    /**
-     * Creates an Append executor.
-     *
-     * @param is input stream to append.
-     * @param path path of the file to append.
-     */
-    public FSAppend(InputStream is, String path) {
-      this.is = is;
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      OutputStream os = fs.append(path, bufferSize);
-      long bytes = copyBytes(is, os);
-      HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that performs a concat FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSConcat
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-    private Path path;
-    private Path[] sources;
-
-    /**
-     * Creates a Concat executor.
-     *
-     * @param path target path to concat to.
-     * @param sources comma separated absolute paths to use as sources.
-     */
-    public FSConcat(String path, String[] sources) {
-      this.sources = new Path[sources.length];
-
-      for (int i = 0; i < sources.length; i++) {
-        this.sources[i] = new Path(sources[i]);
-      }
-
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.concat(path, sources);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that performs a truncate FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSTruncate implements 
-      FileSystemAccess.FileSystemExecutor<JSONObject> {
-    private Path path;
-    private long newLength;
-
-    /**
-     * Creates a Truncate executor.
-     *
-     * @param path target path to truncate to.
-     * @param newLength The size the file is to be truncated to.
-     */
-    public FSTruncate(String path, long newLength) {
-      this.path = new Path(path);
-      this.newLength = newLength;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return <code>true</code> if the file has been truncated to the desired,
-     *         <code>false</code> if a background process of adjusting the 
-     *         length of the last block has been started, and clients should 
-     *         wait for it to complete before proceeding with further file 
-     *         updates.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public JSONObject execute(FileSystem fs) throws IOException {
-      boolean result = fs.truncate(path, newLength);
-      HttpFSServerWebApp.get().getMetrics().incrOpsTruncate();
-      return toJSON(
-          StringUtils.toLowerCase(HttpFSConstants.TRUNCATE_JSON), result);
-    }
-
-  }
-
-  /**
-   * Executor that performs a content-summary FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSContentSummary
-      implements FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-
-    /**
-     * Creates a content-summary executor.
-     *
-     * @param path the path to retrieve the content-summary.
-     */
-    public FSContentSummary(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return a Map object (JSON friendly) with the content-summary.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      ContentSummary contentSummary = fs.getContentSummary(path);
-      return contentSummaryToJSON(contentSummary);
-    }
-
-  }
-
-  /**
-   * Executor that performs a quota-usage FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSQuotaUsage
-      implements FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-
-    public FSQuotaUsage(String path) {
-      this.path = new Path(path);
-    }
-
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      QuotaUsage quotaUsage = fs.getQuotaUsage(path);
-      return quotaUsageToJSON(quotaUsage);
-    }
-  }
-
-  /**
-   * Executor that performs a create FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSCreate
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-    private InputStream is;
-    private Path path;
-    private short permission;
-    private short unmaskedPermission;
-    private boolean override;
-    private short replication;
-    private long blockSize;
-
-    /**
-     * Creates a Create executor.
-     *
-     * @param is input stream to for the file to create.
-     * @param path path of the file to create.
-     * @param perm permission for the file.
-     * @param override if the file should be overriden if it already exist.
-     * @param repl the replication factor for the file.
-     * @param blockSize the block size for the file.
-     * @param unmaskedPerm unmasked permissions for the file
-     */
-    public FSCreate(InputStream is, String path, short perm, boolean override,
-                    short repl, long blockSize, short unmaskedPerm) {
-      this.is = is;
-      this.path = new Path(path);
-      this.permission = perm;
-      this.unmaskedPermission = unmaskedPerm;
-      this.override = override;
-      this.replication = repl;
-      this.blockSize = blockSize;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return The URI of the created file.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      if (replication == -1) {
-        replication = fs.getDefaultReplication(path);
-      }
-      if (blockSize == -1) {
-        blockSize = fs.getDefaultBlockSize(path);
-      }
-      FsPermission fsPermission = new FsPermission(permission);
-      if (unmaskedPermission != -1) {
-        fsPermission = FsCreateModes.create(fsPermission,
-            new FsPermission(unmaskedPermission));
-      }
-      OutputStream os = fs.create(path,
-          fsPermission,
-          override,
-          bufferSize,
-          replication,
-          blockSize,
-          null);
-      long bytes = copyBytes(is, os);
-      HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes);
-      return null;
-    }
-
-  }
-
-  /**
-   * These copyBytes methods combines the two different flavors used originally.
-   * One with length and another one with buffer size.
-   * In this impl, buffer size is determined internally, which is a singleton
-   * normally set during initialization.
-   * @param in the inputStream
-   * @param out the outputStream
-   * @return the totalBytes
-   * @throws IOException the exception to be thrown.
-   */
-  public static long copyBytes(InputStream in, OutputStream out)
-      throws IOException {
-    return copyBytes(in, out, Long.MAX_VALUE);
-  }
-
-  public static long copyBytes(InputStream in, OutputStream out, long count)
-      throws IOException {
-    long totalBytes = 0;
-
-    // If bufferSize is not initialized use 4k. This will not happen
-    // if all callers check and set it.
-    byte[] buf = new byte[bufferSize];
-    long bytesRemaining = count;
-    int bytesRead;
-
-    try {
-      while (bytesRemaining > 0) {
-        int bytesToRead = (int)
-            (bytesRemaining < buf.length ? bytesRemaining : buf.length);
-
-        bytesRead = in.read(buf, 0, bytesToRead);
-        if (bytesRead == -1) {
-          break;
-        }
-
-        out.write(buf, 0, bytesRead);
-        bytesRemaining -= bytesRead;
-        totalBytes += bytesRead;
-      }
-      return totalBytes;
-    } finally {
-      // Originally IOUtils.copyBytes() were called with close=true. So we are
-      // implementing the same behavior here.
-      try {
-        in.close();
-      } finally {
-        out.close();
-      }
-    }
-  }
-
-  /**
-   * Executor that performs a delete FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSDelete
-      implements FileSystemAccess.FileSystemExecutor<JSONObject> {
-    private Path path;
-    private boolean recursive;
-
-    /**
-     * Creates a Delete executor.
-     *
-     * @param path path to delete.
-     * @param recursive if the delete should be recursive or not.
-     */
-    public FSDelete(String path, boolean recursive) {
-      this.path = new Path(path);
-      this.recursive = recursive;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return <code>true</code> if the delete operation was successful,
-     *         <code>false</code> otherwise.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public JSONObject execute(FileSystem fs) throws IOException {
-      boolean deleted = fs.delete(path, recursive);
-      HttpFSServerWebApp.get().getMetrics().incrOpsDelete();
-      return toJSON(
-          StringUtils.toLowerCase(HttpFSConstants.DELETE_JSON), deleted);
-    }
-
-  }
-
-  /**
-   * Executor that performs a file-checksum FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSFileChecksum
-      implements FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-
-    /**
-     * Creates a file-checksum executor.
-     *
-     * @param path the path to retrieve the checksum.
-     */
-    public FSFileChecksum(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return a Map object (JSON friendly) with the file checksum.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      FileChecksum checksum = fs.getFileChecksum(path);
-      return fileChecksumToJSON(checksum);
-    }
-
-  }
-
-  /**
-   * Executor that performs a file-status FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSFileStatus
-      implements FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-
-    /**
-     * Creates a file-status executor.
-     *
-     * @param path the path to retrieve the status.
-     */
-    public FSFileStatus(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem getFileStatus operation and returns the
-     * result in a JSONish Map.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return a Map object (JSON friendly) with the file status.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      FileStatus status = fs.getFileStatus(path);
-      HttpFSServerWebApp.get().getMetrics().incrOpsStat();
-      return toJson(status);
-    }
-
-  }
-
-  /**
-   * Executor that performs a home-dir FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSHomeDir
-      implements FileSystemAccess.FileSystemExecutor<JSONObject> {
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return a JSON object with the user home directory.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    @SuppressWarnings("unchecked")
-    public JSONObject execute(FileSystem fs) throws IOException {
-      Path homeDir = fs.getHomeDirectory();
-      JSONObject json = new JSONObject();
-      json.put(HttpFSConstants.HOME_DIR_JSON, homeDir.toUri().getPath());
-      return json;
-    }
-  }
-
-  /**
-   * Executor that performs a list-status FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSListStatus
-      implements FileSystemAccess.FileSystemExecutor<Map>, PathFilter {
-    private Path path;
-    private PathFilter filter;
-
-    /**
-     * Creates a list-status executor.
-     *
-     * @param path the directory/file to retrieve the status of its contents.
-     * @param filter glob filter to use.
-     *
-     * @throws IOException thrown if the filter expression is incorrect.
-     */
-    public FSListStatus(String path, String filter) throws IOException {
-      this.path = new Path(path);
-      this.filter = (filter == null) ? this : new GlobFilter(filter);
-    }
-
-    /**
-     * Returns data for a JSON Map containing the information for
-     * the set of files in 'path' that match 'filter'.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return a Map with the file status of the directory
-     *         contents that match the filter
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      FileStatus[] fileStatuses = fs.listStatus(path, filter);
-      HttpFSServerWebApp.get().getMetrics().incrOpsListing();
-      return toJson(fileStatuses, fs.getFileStatus(path).isFile());
-    }
-
-    @Override
-    public boolean accept(Path p) {
-      return true;
-    }
-
-  }
-
-  /**
-   * Executor that performs a batched directory listing.
-   */
-  @InterfaceAudience.Private
-  public static class FSListStatusBatch implements FileSystemAccess
-      .FileSystemExecutor<Map> {
-    private final Path path;
-    private final byte[] token;
-
-    public FSListStatusBatch(String path, byte[] token) throws IOException {
-      this.path = new Path(path);
-      this.token = token.clone();
-    }
-
-    /**
-     * Simple wrapper filesystem that exposes the protected batched
-     * listStatus API so we can use it.
-     */
-    private static class WrappedFileSystem extends FilterFileSystem {
-      WrappedFileSystem(FileSystem f) {
-        super(f);
-      }
-
-      @Override
-      public DirectoryEntries listStatusBatch(Path f, byte[] token) throws
-          FileNotFoundException, IOException {
-        return super.listStatusBatch(f, token);
-      }
-    }
-
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      WrappedFileSystem wrappedFS = new WrappedFileSystem(fs);
-      FileSystem.DirectoryEntries entries =
-          wrappedFS.listStatusBatch(path, token);
-      return toJson(entries, wrappedFS.getFileStatus(path).isFile());
-    }
-  }
-
-  /**
-   * Executor that performs a mkdirs FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSMkdirs
-      implements FileSystemAccess.FileSystemExecutor<JSONObject> {
-
-    private Path path;
-    private short permission;
-    private short unmaskedPermission;
-
-    /**
-     * Creates a mkdirs executor.
-     *
-     * @param path directory path to create.
-     * @param permission permission to use.
-     * @param unmaskedPermission unmasked permissions for the directory
-     */
-    public FSMkdirs(String path, short permission,
-        short unmaskedPermission) {
-      this.path = new Path(path);
-      this.permission = permission;
-      this.unmaskedPermission = unmaskedPermission;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return <code>true</code> if the mkdirs operation was successful,
-     *         <code>false</code> otherwise.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public JSONObject execute(FileSystem fs) throws IOException {
-      FsPermission fsPermission = new FsPermission(permission);
-      if (unmaskedPermission != -1) {
-        fsPermission = FsCreateModes.create(fsPermission,
-            new FsPermission(unmaskedPermission));
-      }
-      boolean mkdirs = fs.mkdirs(path, fsPermission);
-      HttpFSServerWebApp.get().getMetrics().incrOpsMkdir();
-      return toJSON(HttpFSConstants.MKDIRS_JSON, mkdirs);
-    }
-
-  }
-
-  /**
-   * Executor that performs a open FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSOpen
-      implements FileSystemAccess.FileSystemExecutor<InputStream> {
-    private Path path;
-
-    /**
-     * Creates a open executor.
-     *
-     * @param path file to open.
-     */
-    public FSOpen(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return The inputstream of the file.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public InputStream execute(FileSystem fs) throws IOException {
-      // Only updating ops count. bytesRead is updated in InputStreamEntity
-      HttpFSServerWebApp.get().getMetrics().incrOpsOpen();
-      return fs.open(path, bufferSize);
-    }
-
-  }
-
-  /**
-   * Executor that performs a rename FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSRename
-      implements FileSystemAccess.FileSystemExecutor<JSONObject> {
-    private Path path;
-    private Path toPath;
-
-    /**
-     * Creates a rename executor.
-     *
-     * @param path path to rename.
-     * @param toPath new name.
-     */
-    public FSRename(String path, String toPath) {
-      this.path = new Path(path);
-      this.toPath = new Path(toPath);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return <code>true</code> if the rename operation was successful,
-     *         <code>false</code> otherwise.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public JSONObject execute(FileSystem fs) throws IOException {
-      boolean renamed = fs.rename(path, toPath);
-      HttpFSServerWebApp.get().getMetrics().incrOpsRename();
-      return toJSON(HttpFSConstants.RENAME_JSON, renamed);
-    }
-
-  }
-
-  /**
-   * Executor that performs a set-owner FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetOwner
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-    private Path path;
-    private String owner;
-    private String group;
-
-    /**
-     * Creates a set-owner executor.
-     *
-     * @param path the path to set the owner.
-     * @param owner owner to set.
-     * @param group group to set.
-     */
-    public FSSetOwner(String path, String owner, String group) {
-      this.path = new Path(path);
-      this.owner = owner;
-      this.group = group;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.setOwner(path, owner, group);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that performs a set-permission FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetPermission
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private short permission;
-
-    /**
-     * Creates a set-permission executor.
-     *
-     * @param path path to set the permission.
-     * @param permission permission to set.
-     */
-    public FSSetPermission(String path, short permission) {
-      this.path = new Path(path);
-      this.permission = permission;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      FsPermission fsPermission = new FsPermission(permission);
-      fs.setPermission(path, fsPermission);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that sets the acl for a file in a FileSystem.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetAcl
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private List<AclEntry> aclEntries;
-
-    /**
-     * Creates a set-acl executor.
-     *
-     * @param path path to set the acl.
-     * @param aclSpec acl to set.
-     */
-    public FSSetAcl(String path, String aclSpec) {
-      this.path = new Path(path);
-      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.setAcl(path, aclEntries);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that removes all acls from a file in a FileSystem.
-   */
-  @InterfaceAudience.Private
-  public static class FSRemoveAcl
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-
-    /**
-     * Creates a remove-acl executor.
-     *
-     * @param path path from which to remove the acl.
-     */
-    public FSRemoveAcl(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.removeAcl(path);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that modifies acl entries for a file in a FileSystem.
-   */
-  @InterfaceAudience.Private
-  public static class FSModifyAclEntries
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private List<AclEntry> aclEntries;
-
-    /**
-     * Creates a modify-acl executor.
-     *
-     * @param path path to set the acl.
-     * @param aclSpec acl to set.
-     */
-    public FSModifyAclEntries(String path, String aclSpec) {
-      this.path = new Path(path);
-      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.modifyAclEntries(path, aclEntries);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that removes acl entries from a file in a FileSystem.
-   */
-  @InterfaceAudience.Private
-  public static class FSRemoveAclEntries
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private List<AclEntry> aclEntries;
-
-    /**
-     * Creates a remove acl entry executor.
-     *
-     * @param path path to set the acl.
-     * @param aclSpec acl parts to remove.
-     */
-    public FSRemoveAclEntries(String path, String aclSpec) {
-      this.path = new Path(path);
-      this.aclEntries = AclEntry.parseAclSpec(aclSpec, false);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.removeAclEntries(path, aclEntries);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that removes the default acl from a directory in a FileSystem.
-   */
-  @InterfaceAudience.Private
-  public static class FSRemoveDefaultAcl
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-
-    /**
-     * Creates an executor for removing the default acl.
-     *
-     * @param path path to set the acl.
-     */
-    public FSRemoveDefaultAcl(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.removeDefaultAcl(path);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that performs getting trash root FileSystemAccess
-   * files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSTrashRoot
-      implements FileSystemAccess.FileSystemExecutor<JSONObject> {
-    private Path path;
-    public FSTrashRoot(String path) {
-      this.path = new Path(path);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public JSONObject execute(FileSystem fs) throws IOException {
-      Path trashRoot = fs.getTrashRoot(this.path);
-      JSONObject json = new JSONObject();
-      json.put(HttpFSConstants.TRASH_DIR_JSON, trashRoot.toUri().getPath());
-      return json;
-    }
-
-  }
-
-  /**
-   * Executor that gets the ACL information for a given file.
-   */
-  @InterfaceAudience.Private
-  public static class FSAclStatus
-      implements FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-
-    /**
-     * Creates an executor for getting the ACLs for a file.
-     *
-     * @param path the path to retrieve the ACLs.
-     */
-    public FSAclStatus(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return a Map object (JSON friendly) with the file status.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      AclStatus status = fs.getAclStatus(path);
-      return aclStatusToJSON(status);
-    }
-
-  }
-
-  /**
-   * Executor that performs a set-replication FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetReplication
-      implements FileSystemAccess.FileSystemExecutor<JSONObject> {
-    private Path path;
-    private short replication;
-
-    /**
-     * Creates a set-replication executor.
-     *
-     * @param path path to set the replication factor.
-     * @param replication replication factor to set.
-     */
-    public FSSetReplication(String path, short replication) {
-      this.path = new Path(path);
-      this.replication = replication;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return <code>true</code> if the replication value was set,
-     *         <code>false</code> otherwise.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    @SuppressWarnings("unchecked")
-    public JSONObject execute(FileSystem fs) throws IOException {
-      boolean ret = fs.setReplication(path, replication);
-      JSONObject json = new JSONObject();
-      json.put(HttpFSConstants.SET_REPLICATION_JSON, ret);
-      return json;
-    }
-
-  }
-
-  /**
-   * Executor that performs a set-times FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetTimes
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-    private Path path;
-    private long mTime;
-    private long aTime;
-
-    /**
-     * Creates a set-times executor.
-     *
-     * @param path path to set the times.
-     * @param mTime modified time to set.
-     * @param aTime access time to set.
-     */
-    public FSSetTimes(String path, long mTime, long aTime) {
-      this.path = new Path(path);
-      this.mTime = mTime;
-      this.aTime = aTime;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return void.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.setTimes(path, mTime, aTime);
-      return null;
-    }
-
-  }
-
-  /**
-   * Executor that performs a setxattr FileSystemAccess files system operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetXAttr implements 
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private String name;
-    private byte[] value;
-    private EnumSet<XAttrSetFlag> flag;
-
-    public FSSetXAttr(String path, String name, String encodedValue, 
-        EnumSet<XAttrSetFlag> flag) throws IOException {
-      this.path = new Path(path);
-      this.name = name;
-      this.value = XAttrCodec.decodeValue(encodedValue);
-      this.flag = flag;
-    }
-
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.setXAttr(path, name, value, flag);
-      return null;
-    }
-  }
-
-  /**
-   * Executor that performs a removexattr FileSystemAccess files system 
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSRemoveXAttr implements 
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private String name;
-
-    public FSRemoveXAttr(String path, String name) {
-      this.path = new Path(path);
-      this.name = name;
-    }
-
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.removeXAttr(path, name);
-      return null;
-    }
-  }
-
-  /**
-   * Executor that performs listing xattrs FileSystemAccess files system 
-   * operation.
-   */
-  @SuppressWarnings("rawtypes")
-  @InterfaceAudience.Private
-  public static class FSListXAttrs implements 
-      FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-
-    /**
-     * Creates listing xattrs executor.
-     *
-     * @param path the path to retrieve the xattrs.
-     */
-    public FSListXAttrs(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return Map a map object (JSON friendly) with the xattr names.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      List<String> names = fs.listXAttrs(path);
-      return xAttrNamesToJSON(names);
-    }
-  }
-
-  /**
-   * Executor that performs getting xattrs FileSystemAccess files system 
-   * operation.
-   */
-  @SuppressWarnings("rawtypes")
-  @InterfaceAudience.Private
-  public static class FSGetXAttrs implements 
-      FileSystemAccess.FileSystemExecutor<Map> {
-    private Path path;
-    private List<String> names;
-    private XAttrCodec encoding;
-
-    /**
-     * Creates getting xattrs executor.
-     *
-     * @param path the path to retrieve the xattrs.
-     */
-    public FSGetXAttrs(String path, List<String> names, XAttrCodec encoding) {
-      this.path = new Path(path);
-      this.names = names;
-      this.encoding = encoding;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     *
-     * @param fs filesystem instance to use.
-     *
-     * @return Map a map object (JSON friendly) with the xattrs.
-     *
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Map execute(FileSystem fs) throws IOException {
-      Map<String, byte[]> xattrs = null;
-      if (names != null && !names.isEmpty()) {
-        xattrs = fs.getXAttrs(path, names);
-      } else {
-        xattrs = fs.getXAttrs(path);
-      }
-      return xAttrsToJSON(xattrs, encoding);
-    }
-  }
-
-  /**
-   * Executor that performs a getAllStoragePolicies FileSystemAccess files
-   * system operation.
-   */
-  @SuppressWarnings({ "unchecked" })
-  @InterfaceAudience.Private
-  public static class FSGetAllStoragePolicies implements
-      FileSystemAccess.FileSystemExecutor<JSONObject> {
-
-    @Override
-    public JSONObject execute(FileSystem fs) throws IOException {
-      Collection<? extends BlockStoragePolicySpi> storagePolicies = fs
-          .getAllStoragePolicies();
-      return storagePoliciesToJSON(storagePolicies);
-    }
-  }
-
-  /**
-   * Executor that performs a getStoragePolicy FileSystemAccess files system
-   * operation.
-   */
-  @SuppressWarnings({ "unchecked" })
-  @InterfaceAudience.Private
-  public static class FSGetStoragePolicy implements
-      FileSystemAccess.FileSystemExecutor<JSONObject> {
-
-    private Path path;
-
-    public FSGetStoragePolicy(String path) {
-      this.path = new Path(path);
-    }
-
-    @Override
-    public JSONObject execute(FileSystem fs) throws IOException {
-      BlockStoragePolicySpi storagePolicy = fs.getStoragePolicy(path);
-      JSONObject json = new JSONObject();
-      json.put(HttpFSConstants.STORAGE_POLICY_JSON,
-          storagePolicyToJSON(storagePolicy));
-      return json;
-    }
-  }
-
-  /**
-   * Executor that performs a setStoragePolicy FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetStoragePolicy implements
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private String policyName;
-
-    public FSSetStoragePolicy(String path, String policyName) {
-      this.path = new Path(path);
-      this.policyName = policyName;
-    }
-
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.setStoragePolicy(path, policyName);
-      return null;
-    }
-  }
-
-  /**
-   * Executor that performs a unsetStoragePolicy FileSystemAccess files system
-   * operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSUnsetStoragePolicy implements
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-
-    public FSUnsetStoragePolicy(String path) {
-      this.path = new Path(path);
-    }
-
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.unsetStoragePolicy(path);
-      return null;
-    }
-  }
-
-  /**
-   *  Executor that performs an allowSnapshot operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSAllowSnapshot implements
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-
-    /**
-     * Creates a allowSnapshot executor.
-     * @param path directory path to allow snapshot.
-     */
-    public FSAllowSnapshot(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        dfs.allowSnapshot(path);
-      } else {
-        throw new UnsupportedOperationException("allowSnapshot is not "
-            + "supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return null;
-    }
-  }
-
-  /**
-   *  Executor that performs an disallowSnapshot operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSDisallowSnapshot implements
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-
-    /**
-     * Creates a disallowSnapshot executor.
-     * @param path directory path to allow snapshot.
-     */
-    public FSDisallowSnapshot(String path) {
-      this.path = new Path(path);
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        dfs.disallowSnapshot(path);
-      } else {
-        throw new UnsupportedOperationException("disallowSnapshot is not "
-            + "supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return null;
-    }
-  }
-
-  /**
-   *  Executor that performs a createSnapshot FileSystemAccess operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSCreateSnapshot implements
-      FileSystemAccess.FileSystemExecutor<String> {
-
-    private Path path;
-    private String snapshotName;
-
-    /**
-     * Creates a createSnapshot executor.
-     * @param path directory path to be snapshotted.
-     * @param snapshotName the snapshot name.
-     */
-    public FSCreateSnapshot(String path, String snapshotName) {
-      this.path = new Path(path);
-      this.snapshotName = snapshotName;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @return <code>Path</code> the complete path for newly created snapshot
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public String execute(FileSystem fs) throws IOException {
-      Path snapshotPath = fs.createSnapshot(path, snapshotName);
-      JSONObject json = toJSON(HttpFSConstants.HOME_DIR_JSON,
-          snapshotPath.toString());
-      return json.toJSONString().replaceAll("\\\\", "");
-    }
-  }
-
-  /**
-   *  Executor that performs a deleteSnapshot FileSystemAccess operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSDeleteSnapshot implements
-      FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private String snapshotName;
-
-    /**
-     * Creates a deleteSnapshot executor.
-     * @param path path for the snapshot to be deleted.
-     * @param snapshotName snapshot name.
-     */
-    public FSDeleteSnapshot(String path, String snapshotName) {
-      this.path = new Path(path);
-      this.snapshotName = snapshotName;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @return void
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.deleteSnapshot(path, snapshotName);
-      return null;
-    }
-  }
-
-  /**
-   *  Executor that performs a renameSnapshot FileSystemAccess operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSRenameSnapshot implements
-      FileSystemAccess.FileSystemExecutor<Void> {
-    private Path path;
-    private String oldSnapshotName;
-    private String snapshotName;
-
-    /**
-     * Creates a renameSnapshot executor.
-     * @param path directory path of the snapshot to be renamed.
-     * @param oldSnapshotName current snapshot name.
-     * @param snapshotName new snapshot name to be set.
-     */
-    public FSRenameSnapshot(String path, String oldSnapshotName,
-                            String snapshotName) {
-      this.path = new Path(path);
-      this.oldSnapshotName = oldSnapshotName;
-      this.snapshotName = snapshotName;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @return void
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      fs.renameSnapshot(path, oldSnapshotName, snapshotName);
-      return null;
-    }
-  }
-
-  /**
-   *  Executor that performs a getSnapshotDiff operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSGetSnapshotDiff implements
-      FileSystemAccess.FileSystemExecutor<String> {
-    private Path path;
-    private String oldSnapshotName;
-    private String snapshotName;
-
-    /**
-     * Creates a getSnapshotDiff executor.
-     * @param path directory path of the snapshots to be examined.
-     * @param oldSnapshotName Older snapshot name.
-     * @param snapshotName Newer snapshot name.
-     */
-    public FSGetSnapshotDiff(String path, String oldSnapshotName,
-        String snapshotName) {
-      this.path = new Path(path);
-      this.oldSnapshotName = oldSnapshotName;
-      this.snapshotName = snapshotName;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @return A serialized JSON string of snapshot diffs.
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public String execute(FileSystem fs) throws IOException {
-      SnapshotDiffReport sdr = null;
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        sdr = dfs.getSnapshotDiffReport(path, oldSnapshotName, snapshotName);
-      } else {
-        throw new UnsupportedOperationException("getSnapshotDiff is not "
-            + "supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      if (sdr != null) {
-        return JsonUtil.toJsonString(sdr);
-      } else {
-        return "";
-      }
-    }
-  }
-
-  /**
-   *  Executor that performs a getSnapshottableDirListing operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSGetSnapshottableDirListing implements
-      FileSystemAccess.FileSystemExecutor<String> {
-
-    /**
-     * Creates a getSnapshottableDirListing executor.
-     */
-    public FSGetSnapshottableDirListing() {
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @return A JSON string of all snapshottable directories.
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public String execute(FileSystem fs) throws IOException {
-      SnapshottableDirectoryStatus[] sds = null;
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        sds = dfs.getSnapshottableDirListing();
-      } else {
-        throw new UnsupportedOperationException("getSnapshottableDirListing is "
-            + "not supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return JsonUtil.toJsonString(sds);
-    }
-  }
-
-  /**
-   * Executor that performs a getServerDefaults operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSGetServerDefaults
-      implements FileSystemAccess.FileSystemExecutor<String> {
-
-    /**
-     * Creates a getServerDefaults executor.
-     */
-    public FSGetServerDefaults() {
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @return A JSON string.
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public String execute(FileSystem fs) throws IOException {
-      FsServerDefaults sds = null;
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        sds = dfs.getServerDefaults();
-      } else {
-        throw new UnsupportedOperationException("getServerDefaults is "
-            + "not supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return JsonUtil.toJsonString(sds);
-    }
-  }
-
-  /**
-   * Executor that performs a check access operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSAccess
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private FsAction mode;
-
-    /**
-     * Creates a access executor.
-     */
-    public FSAccess(String path, FsAction mode) {
-      this.path = new Path(path);
-      this.mode = mode;
-    }
-
-    /**
-     * Executes the filesystem operation.
-     * @param fs filesystem instance to use.
-     * @throws IOException thrown if an IO error occurred.
-     */
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        dfs.access(path, mode);
-        HttpFSServerWebApp.get().getMetrics().incrOpsCheckAccess();
-      } else {
-        throw new UnsupportedOperationException("checkaccess is "
-            + "not supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return null;
-    }
-  }
-
-  /**
-   * Executor that performs a setErasureCodingPolicy operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSSetErasureCodingPolicy
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-    private String policyName;
-
-    public FSSetErasureCodingPolicy(String path, String policyName) {
-      this.path = new Path(path);
-      this.policyName = policyName;
-    }
-
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        dfs.setErasureCodingPolicy(path, policyName);
-      } else {
-        throw new UnsupportedOperationException("setErasureCodingPolicy is "
-            + "not supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return null;
-    }
-  }
-
-  /**
-   * Executor that performs a getErasureCodingPolicy operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSGetErasureCodingPolicy
-      implements FileSystemAccess.FileSystemExecutor<String> {
-
-    private Path path;
-
-    public FSGetErasureCodingPolicy(String path) {
-      this.path = new Path(path);
-    }
-
-    @Override
-    public String execute(FileSystem fs) throws IOException {
-      ErasureCodingPolicy policy = null;
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        policy = dfs.getErasureCodingPolicy(path);
-      } else {
-        throw new UnsupportedOperationException("getErasureCodingPolicy is "
-            + "not supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return JsonUtil.toJsonString(policy);
-    }
-  }
-
-  /**
-   * Executor that performs a unsetErasureCodingPolicy operation.
-   */
-  @InterfaceAudience.Private
-  public static class FSUnSetErasureCodingPolicy
-      implements FileSystemAccess.FileSystemExecutor<Void> {
-
-    private Path path;
-
-    public FSUnSetErasureCodingPolicy(String path) {
-      this.path = new Path(path);
-    }
-
-    @Override
-    public Void execute(FileSystem fs) throws IOException {
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        dfs.unsetErasureCodingPolicy(path);
-      } else {
-        throw new UnsupportedOperationException("unsetErasureCodingPolicy is "
-            + "not supported for HttpFs on " + fs.getClass()
-            + ". Please check your fs.defaultFS configuration");
-      }
-      return null;
-    }
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSAuthenticationFilter.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSAuthenticationFilter.java
deleted file mode 100644
index 52bc890f06..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSAuthenticationFilter.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.hdfs.web.WebHdfsConstants;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
-import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
-
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * Subclass of hadoop-auth <code>AuthenticationFilter</code> that obtains its
- * configuration from HttpFSServer's server configuration.
- */
-@InterfaceAudience.Private
-public class HttpFSAuthenticationFilter
-    extends DelegationTokenAuthenticationFilter {
-
-  static final String CONF_PREFIX = "httpfs.authentication.";
-
-  static final String HADOOP_HTTP_CONF_PREFIX = "hadoop.http.authentication.";
-
-  private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET
-      + ".file";
-
-  /**
-   * Returns the hadoop-auth configuration from HttpFSServer's configuration.
-   * <p>
-   * It returns all HttpFSServer's configuration properties prefixed with
-   * <code>hadoop.http.authentication</code>. The
-   * <code>hadoop.http.authentication</code> prefix is removed from the
-   * returned property names.
-   *
-   * @param configPrefix parameter not used.
-   * @param filterConfig parameter not used.
-   *
-   * @return hadoop-auth configuration read from HttpFSServer's configuration.
-   */
-  @Override
-  protected Properties getConfiguration(String configPrefix,
-      FilterConfig filterConfig) throws ServletException {
-    Properties props = new Properties();
-    Configuration conf = HttpFSServerWebApp.get().getConfig();
-
-    props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(HADOOP_HTTP_CONF_PREFIX)) {
-        name = name.substring(HADOOP_HTTP_CONF_PREFIX.length());
-        props.setProperty(name, entry.getValue());
-      }
-    }
-
-    // Replace Hadoop Http Authentication Configs with HttpFS specific Configs
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(CONF_PREFIX)) {
-        String value = conf.get(name);
-        name = name.substring(CONF_PREFIX.length());
-        props.setProperty(name, value);
-      }
-    }
-
-    String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
-    if (signatureSecretFile == null) {
-      throw new RuntimeException("Undefined property: "
-          + SIGNATURE_SECRET_FILE);
-    }
-
-    if (!isRandomSecret(filterConfig)) {
-      try (Reader reader = new InputStreamReader(Files.newInputStream(
-          Paths.get(signatureSecretFile)), StandardCharsets.UTF_8)) {
-        StringBuilder secret = new StringBuilder();
-        int c = reader.read();
-        while (c > -1) {
-          secret.append((char) c);
-          c = reader.read();
-        }
-        props.setProperty(AuthenticationFilter.SIGNATURE_SECRET,
-            secret.toString());
-      } catch (IOException ex) {
-        throw new RuntimeException("Could not read HttpFS signature "
-            + "secret file: " + signatureSecretFile);
-      }
-    }
-    setAuthHandlerClass(props);
-    String dtkind = WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString();
-    if (conf.getBoolean(HttpFSServerWebServer.SSL_ENABLED_KEY, false)) {
-      dtkind = WebHdfsConstants.SWEBHDFS_TOKEN_KIND.toString();
-    }
-    props.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
-                      dtkind);
-    return props;
-  }
-
-  protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) {
-    Map<String, String> proxyuserConf = HttpFSServerWebApp.get().getConfig().
-        getValByRegex("httpfs\\.proxyuser\\.");
-    Configuration conf = new Configuration(false);
-    for (Map.Entry<String, String> entry : proxyuserConf.entrySet()) {
-      conf.set(entry.getKey().substring("httpfs.".length()), entry.getValue());
-    }
-    return conf;
-  }
-
-  private boolean isRandomSecret(FilterConfig filterConfig) {
-    SignerSecretProvider secretProvider = (SignerSecretProvider) filterConfig
-        .getServletContext().getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
-    if (secretProvider == null) {
-      return false;
-    }
-    return secretProvider.getClass() == RandomSignerSecretProvider.class;
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSExceptionProvider.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSExceptionProvider.java
deleted file mode 100644
index 7da02b14bc..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSExceptionProvider.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.fs.http.server;
-
-import com.sun.jersey.api.container.ContainerException;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.service.FileSystemAccessException;
-import org.apache.ozone.lib.wsrs.ExceptionProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import javax.ws.rs.core.Response;
-import javax.ws.rs.ext.Provider;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-/**
- * JAX-RS <code>ExceptionMapper</code> implementation that maps HttpFSServer's
- * exceptions to HTTP status codes.
- */
-@Provider
-@InterfaceAudience.Private
-public class HttpFSExceptionProvider extends ExceptionProvider {
-  private static final Logger AUDIT_LOG
-      = LoggerFactory.getLogger("httpfsaudit");
-  private static final Logger LOG
-      = LoggerFactory.getLogger(HttpFSExceptionProvider.class);
-
-  /**
-   * Maps different exceptions thrown by HttpFSServer to HTTP status codes.
-   * <ul>
-   * <li>SecurityException : HTTP UNAUTHORIZED</li>
-   * <li>FileNotFoundException : HTTP NOT_FOUND</li>
-   * <li>IOException : INTERNAL_HTTP SERVER_ERROR</li>
-   * <li>UnsupporteOperationException : HTTP BAD_REQUEST</li>
-   * <li>all other exceptions : HTTP INTERNAL_SERVER_ERROR </li>
-   * </ul>
-   *
-   * @param throwable exception thrown.
-   *
-   * @return mapped HTTP status code
-   */
-  @Override
-  public Response toResponse(Throwable throwable) {
-    Response.Status status;
-    if (throwable instanceof FileSystemAccessException) {
-      throwable = throwable.getCause();
-    }
-    if (throwable instanceof ContainerException) {
-      throwable = throwable.getCause();
-    }
-    if (throwable instanceof SecurityException) {
-      status = Response.Status.UNAUTHORIZED;
-    } else if (throwable instanceof FileNotFoundException) {
-      status = Response.Status.NOT_FOUND;
-    } else if (throwable instanceof IOException) {
-      status = Response.Status.INTERNAL_SERVER_ERROR;
-      logErrorFully(status, throwable);
-    } else if (throwable instanceof UnsupportedOperationException) {
-      status = Response.Status.BAD_REQUEST;
-      logErrorFully(status, throwable);
-    } else if (throwable instanceof IllegalArgumentException) {
-      status = Response.Status.BAD_REQUEST;
-      logErrorFully(status, throwable);
-    } else {
-      status = Response.Status.INTERNAL_SERVER_ERROR;
-      logErrorFully(status, throwable);
-    }
-    return createResponse(status, throwable);
-  }
-
-  /**
-   * Logs the HTTP status code and exception in HttpFSServer's log.
-   *
-   * @param status HTTP status code.
-   * @param throwable exception thrown.
-   */
-  @Override
-  protected void log(Response.Status status, Throwable throwable) {
-    String method = MDC.get("method");
-    String path = MDC.get("path");
-    String message = getOneLineMessage(throwable);
-    AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}",
-        new Object[]{method, path, status, message});
-    LOG.warn("[{}:{}] response [{}] {}",
-        method,
-        path,
-        status,
-        message,
-        throwable);
-  }
-
-  private void logErrorFully(Response.Status status, Throwable throwable) {
-    LOG.debug("Failed with {}", status, throwable);
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSParametersProvider.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSParametersProvider.java
deleted file mode 100644
index 421c7f1fa6..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSParametersProvider.java
+++ /dev/null
@@ -1,724 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http.server;
-
-import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.XAttrSetFlag;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.fs.http.HttpFSConstants;
-import org.apache.ozone.fs.http.HttpFSConstants.Operation;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.ozone.lib.service.FileSystemAccess;
-import org.apache.ozone.lib.wsrs.BooleanParam;
-import org.apache.ozone.lib.wsrs.EnumParam;
-import org.apache.ozone.lib.wsrs.EnumSetParam;
-import org.apache.ozone.lib.wsrs.LongParam;
-import org.apache.ozone.lib.wsrs.Param;
-import org.apache.ozone.lib.wsrs.ParametersProvider;
-import org.apache.ozone.lib.wsrs.ShortParam;
-import org.apache.ozone.lib.wsrs.StringParam;
-import org.apache.hadoop.util.StringUtils;
-
-import javax.ws.rs.ext.Provider;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Pattern;
-
-/**
- * HttpFS ParametersProvider.
- */
-@Provider
-@InterfaceAudience.Private
-@SuppressWarnings("unchecked")
-public class HttpFSParametersProvider extends ParametersProvider {
-
-  private static final Map<Enum, Class<Param<?>>[]> PARAMS_DEF
-      = new HashMap<Enum, Class<Param<?>>[]>();
-
-  static {
-    PARAMS_DEF.put(Operation.OPEN,
-        new Class[]{OffsetParam.class, LenParam.class, NoRedirectParam.class});
-    PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{});
-    PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class});
-    PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{});
-    PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{});
-    PARAMS_DEF.put(Operation.GETQUOTAUSAGE, new Class[]{});
-    PARAMS_DEF.put(Operation.GETFILECHECKSUM,
-        new Class[]{NoRedirectParam.class});
-    PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{});
-    PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{});
-    PARAMS_DEF.put(Operation.GETTRASHROOT, new Class[]{});
-    PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{});
-    PARAMS_DEF.put(Operation.APPEND,
-        new Class[]{DataParam.class, NoRedirectParam.class});
-    PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
-    PARAMS_DEF.put(Operation.TRUNCATE, new Class[]{NewLengthParam.class});
-    PARAMS_DEF.put(Operation.CREATE,
-        new Class[]{PermissionParam.class, OverwriteParam.class,
-            ReplicationParam.class, BlockSizeParam.class, DataParam.class,
-            UnmaskedPermissionParam.class, NoRedirectParam.class});
-    PARAMS_DEF.put(Operation.MKDIRS, new Class[]{PermissionParam.class,
-        UnmaskedPermissionParam.class});
-    PARAMS_DEF.put(Operation.RENAME, new Class[]{DestinationParam.class});
-    PARAMS_DEF.put(Operation.SETOWNER,
-        new Class[]{OwnerParam.class, GroupParam.class});
-    PARAMS_DEF.put(Operation.SETPERMISSION, new Class[]{PermissionParam.class});
-    PARAMS_DEF.put(Operation.SETREPLICATION,
-        new Class[]{ReplicationParam.class});
-    PARAMS_DEF.put(Operation.SETTIMES,
-        new Class[]{ModifiedTimeParam.class, AccessTimeParam.class});
-    PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class});
-    PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class});
-    PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{});
-    PARAMS_DEF.put(Operation.MODIFYACLENTRIES,
-        new Class[]{AclPermissionParam.class});
-    PARAMS_DEF.put(Operation.REMOVEACLENTRIES,
-        new Class[]{AclPermissionParam.class});
-    PARAMS_DEF.put(Operation.REMOVEDEFAULTACL, new Class[]{});
-    PARAMS_DEF.put(Operation.SETXATTR,
-        new Class[]{XAttrNameParam.class,
-            XAttrValueParam.class,
-            XAttrSetFlagParam.class});
-    PARAMS_DEF.put(Operation.REMOVEXATTR, new Class[]{XAttrNameParam.class});
-    PARAMS_DEF.put(Operation.GETXATTRS,
-        new Class[]{XAttrNameParam.class, XAttrEncodingParam.class});
-    PARAMS_DEF.put(Operation.LISTXATTRS, new Class[]{});
-    PARAMS_DEF.put(Operation.LISTSTATUS_BATCH,
-        new Class[]{StartAfterParam.class});
-    PARAMS_DEF.put(Operation.GETALLSTORAGEPOLICY, new Class[] {});
-    PARAMS_DEF.put(Operation.GETSTORAGEPOLICY, new Class[] {});
-    PARAMS_DEF.put(Operation.SETSTORAGEPOLICY,
-        new Class[] {PolicyNameParam.class});
-    PARAMS_DEF.put(Operation.UNSETSTORAGEPOLICY, new Class[] {});
-    PARAMS_DEF.put(Operation.ALLOWSNAPSHOT, new Class[] {});
-    PARAMS_DEF.put(Operation.DISALLOWSNAPSHOT, new Class[] {});
-    PARAMS_DEF.put(Operation.CREATESNAPSHOT,
-            new Class[] {SnapshotNameParam.class});
-    PARAMS_DEF.put(Operation.DELETESNAPSHOT,
-            new Class[] {SnapshotNameParam.class});
-    PARAMS_DEF.put(Operation.RENAMESNAPSHOT,
-            new Class[] {OldSnapshotNameParam.class,
-                SnapshotNameParam.class});
-    PARAMS_DEF.put(Operation.GETSNAPSHOTDIFF,
-        new Class[] {OldSnapshotNameParam.class,
-            SnapshotNameParam.class});
-    PARAMS_DEF.put(Operation.GETSNAPSHOTTABLEDIRECTORYLIST, new Class[] {});
-    PARAMS_DEF.put(Operation.GETSERVERDEFAULTS, new Class[] {});
-    PARAMS_DEF.put(Operation.CHECKACCESS, new Class[] {FsActionParam.class});
-    PARAMS_DEF.put(Operation.SETECPOLICY, new Class[] {ECPolicyParam.class});
-    PARAMS_DEF.put(Operation.GETECPOLICY, new Class[] {});
-    PARAMS_DEF.put(Operation.UNSETECPOLICY, new Class[] {});
-    PARAMS_DEF.put(Operation.SATISFYSTORAGEPOLICY, new Class[] {});
-  }
-
-  public HttpFSParametersProvider() {
-    super(HttpFSConstants.OP_PARAM, HttpFSConstants.Operation.class,
-          PARAMS_DEF);
-  }
-
-  /**
-   * Class for access-time parameter.
-   */
-  @InterfaceAudience.Private
-  public static class AccessTimeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.ACCESS_TIME_PARAM;
-    /**
-     * Constructor.
-     */
-    public AccessTimeParam() {
-      super(NAME, -1L);
-    }
-  }
-
-  /**
-   * Class for block-size parameter.
-   */
-  @InterfaceAudience.Private
-  public static class BlockSizeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.BLOCKSIZE_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public BlockSizeParam() {
-      super(NAME, -1L);
-    }
-  }
-
-  /**
-   * Class for data parameter.
-   */
-  @InterfaceAudience.Private
-  public static class DataParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "data";
-
-    /**
-     * Constructor.
-     */
-    public DataParam() {
-      super(NAME, false);
-    }
-  }
-
-  /**
-   * Class for noredirect parameter.
-   */
-  @InterfaceAudience.Private
-  public static class NoRedirectParam extends BooleanParam {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "noredirect";
-    /**
-     * Constructor.
-     */
-    public NoRedirectParam() {
-      super(NAME, false);
-    }
-  }
-
-  /**
-   * Class for operation parameter.
-   */
-  @InterfaceAudience.Private
-  public static class OperationParam
-      extends EnumParam<HttpFSConstants.Operation> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.OP_PARAM;
-    /**
-     * Constructor.
-     */
-    public OperationParam(String operation) {
-      super(NAME, HttpFSConstants.Operation.class,
-          HttpFSConstants.Operation.valueOf(
-                StringUtils.toUpperCase(operation)));
-    }
-  }
-
-  /**
-   * Class for delete's recursive parameter.
-   */
-  @InterfaceAudience.Private
-  public static class RecursiveParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.RECURSIVE_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public RecursiveParam() {
-      super(NAME, false);
-    }
-  }
-
-  /**
-   * Class for filter parameter.
-   */
-  @InterfaceAudience.Private
-  public static class FilterParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "filter";
-
-    /**
-     * Constructor.
-     */
-    public FilterParam() {
-      super(NAME, null);
-    }
-
-  }
-
-  /**
-   * Class for group parameter.
-   */
-  @InterfaceAudience.Private
-  public static class GroupParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.GROUP_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public GroupParam() {
-      super(NAME, null);
-    }
-
-  }
-
-  /**
-   * Class for len parameter.
-   */
-  @InterfaceAudience.Private
-  public static class LenParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "length";
-
-    /**
-     * Constructor.
-     */
-    public LenParam() {
-      super(NAME, -1L);
-    }
-  }
-
-  /**
-   * Class for modified-time parameter.
-   */
-  @InterfaceAudience.Private
-  public static class ModifiedTimeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.MODIFICATION_TIME_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public ModifiedTimeParam() {
-      super(NAME, -1L);
-    }
-  }
-
-  /**
-   * Class for offset parameter.
-   */
-  @InterfaceAudience.Private
-  public static class OffsetParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "offset";
-
-    /**
-     * Constructor.
-     */
-    public OffsetParam() {
-      super(NAME, 0L);
-    }
-  }
-
-  /**
-   * Class for newlength parameter.
-   */
-  @InterfaceAudience.Private
-  public static class NewLengthParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.NEW_LENGTH_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public NewLengthParam() {
-      super(NAME, 0L);
-    }
-  }
-
-  /**
-   * Class for overwrite parameter.
-   */
-  @InterfaceAudience.Private
-  public static class OverwriteParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.OVERWRITE_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public OverwriteParam() {
-      super(NAME, true);
-    }
-  }
-
-  /**
-   * Class for owner parameter.
-   */
-  @InterfaceAudience.Private
-  public static class OwnerParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.OWNER_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public OwnerParam() {
-      super(NAME, null);
-    }
-
-  }
-
-  /**
-   * Class for permission parameter.
-   */
-  @InterfaceAudience.Private
-  public static class PermissionParam extends ShortParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.PERMISSION_PARAM;
-
-
-    /**
-     * Constructor.
-     */
-    public PermissionParam() {
-      super(NAME, HttpFSConstants.DEFAULT_PERMISSION, 8);
-    }
-
-  }
-
-  /**
-   * Class for unmaskedpermission parameter.
-   */
-  @InterfaceAudience.Private
-  public static class UnmaskedPermissionParam extends ShortParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME =
-        HttpFSConstants.UNMASKED_PERMISSION_PARAM;
-
-
-    /**
-     * Constructor.
-     */
-    public UnmaskedPermissionParam() {
-      super(NAME, (short) -1, 8);
-    }
-
-  }
-
-  /**
-   * Class for AclPermission parameter.
-   */
-  @InterfaceAudience.Private
-  public static class AclPermissionParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.ACLSPEC_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public AclPermissionParam() {
-      super(NAME,
-          HttpFSConstants.ACLSPEC_DEFAULT,
-          Pattern.compile(HttpFSServerWebApp.get()
-              .get(FileSystemAccess.class)
-              .getFileSystemConfiguration()
-              .get(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
-                  HdfsClientConfigKeys
-                      .DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT)));
-    }
-  }
-
-  /**
-   * Class for replication parameter.
-   */
-  @InterfaceAudience.Private
-  public static class ReplicationParam extends ShortParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.REPLICATION_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public ReplicationParam() {
-      super(NAME, (short) -1);
-    }
-  }
-
-  /**
-   * Class for concat sources parameter.
-   */
-  @InterfaceAudience.Private
-  public static class SourcesParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.SOURCES_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public SourcesParam() {
-      super(NAME, null);
-    }
-  }
-
-  /**
-   * Class for to-path parameter.
-   */
-  @InterfaceAudience.Private
-  public static class DestinationParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.DESTINATION_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public DestinationParam() {
-      super(NAME, null);
-    }
-  }
-  
-  /**
-   * Class for xattr parameter.
-   */
-  @InterfaceAudience.Private
-  public static class XAttrNameParam extends StringParam {
-    public static final String XATTR_NAME_REGX = 
-        "^(user\\.|trusted\\.|system\\.|security\\.).+";
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.XATTR_NAME_PARAM;
-    private static final Pattern PATTERN = Pattern.compile(XATTR_NAME_REGX);
-
-    /**
-     * Constructor.
-     */
-    public XAttrNameParam() {
-      super(NAME, null, PATTERN);
-    }
-  }
-
-  /**
-   * Class for xattr parameter.
-   */
-  @InterfaceAudience.Private
-  public static class XAttrValueParam extends StringParam {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.XATTR_VALUE_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public XAttrValueParam() {
-      super(NAME, null);
-    }
-  }
-
-  /**
-   * Class for xattr parameter.
-   */
-  @InterfaceAudience.Private
-  public static class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.XATTR_SET_FLAG_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public XAttrSetFlagParam() {
-      super(NAME, XAttrSetFlag.class, null);
-    }
-  }
-
-  /**
-   * Class for xattr parameter.
-   */
-  @InterfaceAudience.Private
-  public static class XAttrEncodingParam extends EnumParam<XAttrCodec> {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.XATTR_ENCODING_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public XAttrEncodingParam() {
-      super(NAME, XAttrCodec.class, null);
-    }
-  }
-
-  /**
-   * Class for startafter parameter.
-   */
-  @InterfaceAudience.Private
-  public static class StartAfterParam extends StringParam {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.START_AFTER_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public StartAfterParam() {
-      super(NAME, null);
-    }
-  }
-
-  /**
-   * Class for policyName parameter.
-   */
-  @InterfaceAudience.Private
-  public static class PolicyNameParam extends StringParam {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.POLICY_NAME_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public PolicyNameParam() {
-      super(NAME, null);
-    }
-  }
-
-  /**
-   * Class for SnapshotName parameter.
-   */
-  public static class SnapshotNameParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.SNAPSHOT_NAME_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public SnapshotNameParam() {
-      super(NAME, null);
-    }
-
-  }
-
-  /**
-   * Class for OldSnapshotName parameter.
-   */
-  public static class OldSnapshotNameParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.OLD_SNAPSHOT_NAME_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public OldSnapshotNameParam() {
-      super(NAME, null);
-    }
-  }
-
-  /**
-   * Class for FsAction parameter.
-   */
-  @InterfaceAudience.Private
-  public static class FsActionParam extends StringParam {
-
-    private static final String FILE_SYSTEM_ACTION = "[r-][w-][x-]";
-    private static final Pattern FSACTION_PATTERN =
-        Pattern.compile(FILE_SYSTEM_ACTION);
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.FSACTION_MODE_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public FsActionParam() {
-      super(NAME, null);
-    }
-
-    /**
-     * Constructor.
-     * @param str a string representation of the parameter value.
-     */
-    public FsActionParam(final String str) {
-      super(NAME, str, FSACTION_PATTERN);
-    }
-  }
-
-  /**
-   * Class for ecpolicy parameter.
-   */
-  @InterfaceAudience.Private
-  public static class ECPolicyParam extends StringParam {
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSConstants.EC_POLICY_NAME_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public ECPolicyParam() {
-      super(NAME, null);
-    }
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSReleaseFilter.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSReleaseFilter.java
deleted file mode 100644
index 4689090cf9..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSReleaseFilter.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.fs.http.server;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.service.FileSystemAccess;
-import org.apache.ozone.lib.servlet.FileSystemReleaseFilter;
-
-/**
- * Filter that releases FileSystemAccess filesystem instances upon HTTP request
- * completion.
- */
-@InterfaceAudience.Private
-public class HttpFSReleaseFilter extends FileSystemReleaseFilter {
-
-  /**
-   * Returns the {@link FileSystemAccess} service to return the FileSystemAccess
-   * filesystem instance to.
-   *
-   * @return the FileSystemAccess service.
-   */
-  @Override
-  protected FileSystemAccess getFileSystemAccess() {
-    return HttpFSServerWebApp.get().get(FileSystemAccess.class);
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
deleted file mode 100644
index e35ac3660d..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
+++ /dev/null
@@ -1,1394 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.XAttrSetFlag;
-import org.apache.ozone.fs.http.HttpFSConstants;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.AclPermissionParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.DataParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.DestinationParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.ECPolicyParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.FilterParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.FsActionParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.GroupParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.LenParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.NewLengthParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.NoRedirectParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OffsetParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OldSnapshotNameParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OperationParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OverwriteParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OwnerParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.PermissionParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.PolicyNameParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.RecursiveParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.ReplicationParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.SourcesParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.UnmaskedPermissionParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.SnapshotNameParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.XAttrValueParam;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.http.JettyUtils;
-import org.apache.ozone.lib.service.FileSystemAccess;
-import org.apache.ozone.lib.service.FileSystemAccessException;
-import org.apache.ozone.lib.service.Groups;
-import org.apache.ozone.lib.service.Instrumentation;
-import org.apache.ozone.lib.servlet.FileSystemReleaseFilter;
-import org.apache.ozone.lib.wsrs.InputStreamEntity;
-import org.apache.ozone.lib.wsrs.Parameters;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
-import org.json.simple.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriBuilder;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.nio.charset.StandardCharsets;
-import java.security.AccessControlException;
-import java.security.PrivilegedExceptionAction;
-import java.text.MessageFormat;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Main class of HttpFSServer server.
- * <p>
- * The <code>HttpFSServer</code> class uses Jersey JAX-RS to binds HTTP
- * requests to the different operations.
- */
-@Path(HttpFSConstants.SERVICE_VERSION)
-@InterfaceAudience.Private
-public class HttpFSServer {
-
-  enum AccessMode {
-    READWRITE, WRITEONLY, READONLY;
-  }
-  private static final Logger AUDIT_LOG
-      = LoggerFactory.getLogger("httpfsaudit");
-  private static final Logger LOG = LoggerFactory.getLogger(HttpFSServer.class);
-  private AccessMode accessMode = AccessMode.READWRITE;
-
-  public HttpFSServer() {
-    Configuration conf = HttpFSServerWebApp.get().getConfig();
-    final String accessModeString
-        = conf.get("httpfs.access.mode", "read-write")
-        .toLowerCase();
-    if (accessModeString.compareTo("write-only") == 0) {
-      accessMode = AccessMode.WRITEONLY;
-    } else if (accessModeString.compareTo("read-only") == 0) {
-      accessMode = AccessMode.READONLY;
-    } else {
-      accessMode = AccessMode.READWRITE;
-    }
-  }
-
-
-  // First try getting a user through HttpUserGroupInformation. This will return
-  // if the built-in hadoop auth filter is not used.  Fall back to getting the
-  // authenticated user from the request.
-  private UserGroupInformation getHttpUGI(HttpServletRequest request) {
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    if (user != null) {
-      return user;
-    }
-
-    return UserGroupInformation
-        .createRemoteUser(request.getUserPrincipal().getName());
-  }
-
-
-  /**
-   * Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem
-   * for the effective user.
-   *
-   * @param ugi user making the request.
-   * @param executor FileSystemExecutor to execute.
-   *
-   * @return FileSystemExecutor response
-   *
-   * @throws IOException thrown if an IO error occurs.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  private <T> T fsExecute(UserGroupInformation ugi,
-                          FileSystemAccess.FileSystemExecutor<T> executor)
-      throws IOException, FileSystemAccessException {
-    FileSystemAccess fsAccess
-        = HttpFSServerWebApp.get().get(FileSystemAccess.class);
-    Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class)
-        .getFileSystemConfiguration();
-    return fsAccess.execute(ugi.getShortUserName(), conf, executor);
-  }
-
-  /**
-   * Returns a filesystem instance. The fileystem instance is wired for release
-   * at the completion of the current Servlet request via the
-   * {@link FileSystemReleaseFilter}.
-   * <p>
-   * If a do-as user is specified, the current user must be a valid proxyuser,
-   * otherwise an <code>AccessControlException</code> will be thrown.
-   *
-   * @param ugi principal for whom the filesystem instance is.
-   *
-   * @return a filesystem for the specified user or do-as user.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  private FileSystem createFileSystem(UserGroupInformation ugi)
-      throws IOException, FileSystemAccessException {
-    String hadoopUser = ugi.getShortUserName();
-    FileSystemAccess fsAccess = HttpFSServerWebApp.get()
-        .get(FileSystemAccess.class);
-    Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class)
-        .getFileSystemConfiguration();
-    FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
-    FileSystemReleaseFilter.setFileSystem(fs);
-    return fs;
-  }
-
-  private void enforceRootPath(HttpFSConstants.Operation op, String path) {
-    if (!path.equals("/")) {
-      throw new UnsupportedOperationException(
-        MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
-                             op, path));
-    }
-  }
-
-  /**
-   * Special binding for '/' as it is not handled by the wildcard binding.
-   *
-   * @param uriInfo uri info of the request.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  @GET
-  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
-  public Response getRoot(@Context UriInfo uriInfo,
-                          @QueryParam(OperationParam.NAME) OperationParam op,
-                          @Context Parameters params,
-                          @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException {
-    return get("", uriInfo, op, params, request);
-  }
-
-  private String makeAbsolute(String path) {
-    return "/" + ((path != null) ? path : "");
-  }
-
-  /**
-   * Binding to handle GET requests, supported operations are.
-   *
-   * @param path the path for operation.
-   * @param uriInfo uri info of the request.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  @GET
-  @Path("{path:.*}")
-  @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8,
-      MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
-  public Response get(@PathParam("path") String path,
-                      @Context UriInfo uriInfo,
-                      @QueryParam(OperationParam.NAME) OperationParam op,
-                      @Context Parameters params,
-                      @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException,
-      UnsupportedOperationException {
-    // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
-    if ((op.value() != HttpFSConstants.Operation.GETFILESTATUS) &&
-            (op.value() != HttpFSConstants.Operation.LISTSTATUS) &&
-            accessMode == AccessMode.WRITEONLY) {
-      return Response.status(Response.Status.FORBIDDEN).build();
-    }
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    Response response;
-    path = makeAbsolute(path);
-    MDC.put(HttpFSConstants.OP_PARAM, op.value().name());
-    MDC.put("hostname", request.getRemoteAddr());
-    switch (op.value()) {
-    case OPEN:
-      response = handleOpen(path, uriInfo, params, user);
-      break;
-    case GETFILESTATUS:
-      response = handleGetFileStatus(path, user);
-      break;
-    case LISTSTATUS:
-      response = handleListStatus(path, params, user);
-      break;
-    case GETHOMEDIRECTORY:
-      response = handleGetHomeDir(path, op, user);
-      break;
-    case INSTRUMENTATION:
-      response = handleInstrumentation(path, op, user);
-      break;
-    case GETCONTENTSUMMARY:
-      response = handleGetContentSummary(path, user);
-      break;
-    case GETQUOTAUSAGE:
-      response = handleGetQuotaUsage(path, user);
-      break;
-    case GETFILECHECKSUM:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support GETFILECHECKSUM");
-      // response = handleGetFileCheckSum(path, uriInfo, params, user);
-      // break;
-    case GETFILEBLOCKLOCATIONS:
-      response = Response.status(Response.Status.BAD_REQUEST).build();
-      break;
-    case GETACLSTATUS:
-      response = handleGetACLStatus(path, user);
-      break;
-    case GETXATTRS:
-      response = handleGetXAttrs(path, params, user);
-      break;
-    case LISTXATTRS:
-      response = handleListXAttrs(path, user);
-      break;
-    case LISTSTATUS_BATCH:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support LISTSTATUS_BATCH");
-      //response = handleListStatusBatch(path, params, user);
-      //break;
-    case GETTRASHROOT:
-      response = handleGetTrashRoot(path, user);
-      break;
-    case GETALLSTORAGEPOLICY:
-      response = handleGetAllStoragePolicy(path, user);
-      break;
-    case GETSTORAGEPOLICY:
-      response = handleGetStoragePolicy(path, user);
-      break;
-    case GETSNAPSHOTDIFF:
-      response = handleGetSnapshotDiff(path, params, user);
-      break;
-    case GETSNAPSHOTTABLEDIRECTORYLIST:
-      response = handleGetSnaphotTableDirectoryList(user);
-      break;
-    case GETSERVERDEFAULTS:
-      response = handleGetServerDefaults(user);
-      break;
-    case CHECKACCESS:
-      response = handleCheckAccess(path, params, user);
-      break;
-    case GETECPOLICY:
-      response = handleGetECPolicy(path, user);
-      break;
-    default:
-      throw new IOException(
-          MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
-    }
-    return response;
-  }
-
-  private Response handleGetECPolicy(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSGetErasureCodingPolicy command =
-        new FSOperations.FSGetErasureCodingPolicy(path);
-    String js = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleCheckAccess(String path,
-                                     Parameters params,
-                                     UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String mode = params.get(FsActionParam.NAME, FsActionParam.class);
-    FsActionParam fsparam = new FsActionParam(mode);
-    FSOperations.FSAccess command = new FSOperations.FSAccess(path,
-        FsAction.getFsAction(fsparam.value()));
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", "/");
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleGetServerDefaults(UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSGetServerDefaults command =
-        new FSOperations.FSGetServerDefaults();
-    String js = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", "/");
-    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetSnaphotTableDirectoryList(UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSGetSnapshottableDirListing command =
-        new FSOperations.FSGetSnapshottableDirListing();
-    String js = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", "/");
-    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetSnapshotDiff(String path,
-                                         Parameters params,
-                                         UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
-        OldSnapshotNameParam.class);
-    String snapshotName = params.get(SnapshotNameParam.NAME,
-        SnapshotNameParam.class);
-    FSOperations.FSGetSnapshotDiff command =
-        new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName,
-            snapshotName);
-    String js = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetStoragePolicy(String path,
-                                          UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSGetStoragePolicy command =
-        new FSOperations.FSGetStoragePolicy(path);
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetAllStoragePolicy(String path,
-                                             UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSGetAllStoragePolicies command =
-        new FSOperations.FSGetAllStoragePolicies();
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetTrashRoot(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleListStatusBatch(String path,
-                                         Parameters params,
-                                         UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String startAfter = params.get(
-        HttpFSParametersProvider.StartAfterParam.NAME,
-        HttpFSParametersProvider.StartAfterParam.class);
-    byte[] token = HttpFSConstants.EMPTY_BYTES;
-    if (startAfter != null) {
-      token = startAfter.getBytes(StandardCharsets.UTF_8);
-    }
-    FSOperations.FSListStatusBatch command = new FSOperations
-        .FSListStatusBatch(path, token);
-    @SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}] token [{}]", path, token);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleListXAttrs(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
-    @SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
-    AUDIT_LOG.info("XAttr names for [{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetXAttrs(String path,
-                                   Parameters params,
-                                   UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    List<String> xattrNames =
-        params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
-    XAttrCodec encoding =
-        params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
-    FSOperations.FSGetXAttrs command =
-        new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
-    @SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
-    AUDIT_LOG.info("XAttrs for [{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetACLStatus(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
-    Map json = fsExecute(user, command);
-    AUDIT_LOG.info("ACL status for [{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetFileCheckSum(String path,
-                                         UriInfo uriInfo,
-                                         Parameters params,
-                                         UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSFileChecksum command =
-        new FSOperations.FSFileChecksum(path);
-
-    Boolean noRedirect = params.get(
-        NoRedirectParam.NAME, NoRedirectParam.class);
-    AUDIT_LOG.info("[{}]", path);
-    if (noRedirect) {
-      URI redirectURL = createOpenRedirectionURL(uriInfo);
-      final String js = JsonUtil.toJsonString("Location", redirectURL);
-      response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    } else {
-      Map json = fsExecute(user, command);
-      response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    }
-    return response;
-  }
-
-  private Response handleGetQuotaUsage(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSQuotaUsage command =
-        new FSOperations.FSQuotaUsage(path);
-    Map json = fsExecute(user, command);
-    AUDIT_LOG.info("Quota Usage for [{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetContentSummary(String path,
-                                           UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSContentSummary command =
-        new FSOperations.FSContentSummary(path);
-    Map json = fsExecute(user, command);
-    AUDIT_LOG.info("Content summary for [{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleInstrumentation(String path,
-                                         OperationParam op,
-                                         UserGroupInformation user)
-      throws IOException {
-    Response response;
-    enforceRootPath(op.value(), path);
-    Groups groups = HttpFSServerWebApp.get().get(Groups.class);
-    List<String> userGroups = groups.getGroups(user.getShortUserName());
-    if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
-      throw new AccessControlException(
-          "User not in HttpFSServer admin group");
-    }
-    Instrumentation instrumentation =
-        HttpFSServerWebApp.get().get(Instrumentation.class);
-    Map snapshot = instrumentation.getSnapshot();
-    response = Response.ok(snapshot).build();
-    return response;
-  }
-
-  private Response handleGetHomeDir(String path,
-                                    OperationParam op,
-                                    UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    enforceRootPath(op.value(), path);
-    FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("Home Directory for [{}]", user);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleListStatus(String path,
-                                    Parameters params,
-                                    UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String filter = params.get(FilterParam.NAME, FilterParam.class);
-    FSOperations.FSListStatus command =
-        new FSOperations.FSListStatus(path, filter);
-    Map json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleGetFileStatus(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
-    Map json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleOpen(String path,
-                              UriInfo uriInfo,
-                              Parameters params,
-                              UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Boolean noRedirect = params
-        .get(NoRedirectParam.NAME, NoRedirectParam.class);
-    if (noRedirect) {
-      URI redirectURL = createOpenRedirectionURL(uriInfo);
-      final String js = JsonUtil.toJsonString("Location", redirectURL);
-      response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    } else {
-      //Invoking the command directly using an unmanaged FileSystem that is
-      // released by the FileSystemReleaseFilter
-      final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
-      final FileSystem fs = createFileSystem(user);
-      InputStream is = null;
-      UserGroupInformation ugi = UserGroupInformation
-          .createProxyUser(user.getShortUserName(),
-              UserGroupInformation.getLoginUser());
-      try {
-        is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
-          @Override
-          public InputStream run() throws Exception {
-            return command.execute(fs);
-          }
-        });
-      } catch (InterruptedException ie) {
-        LOG.warn("Open interrupted.", ie);
-        Thread.currentThread().interrupt();
-      }
-      Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
-      Long len = params.get(LenParam.NAME, LenParam.class);
-      AUDIT_LOG.info("[{}] offset [{}] len [{}]",
-          new Object[]{path, offset, len});
-      InputStreamEntity entity = new InputStreamEntity(is, offset, len);
-      response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM)
-          .build();
-    }
-    return response;
-  }
-
-  /**
-   * Create an open redirection URL from a request. It points to the same
-   * HttpFS endpoint but removes the "redirect" parameter.
-   * @param uriInfo uri info of the request.
-   * @return URL for the redirected location.
-   */
-  private URI createOpenRedirectionURL(UriInfo uriInfo) {
-    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
-    uriBuilder.replaceQueryParam(NoRedirectParam.NAME, (Object[])null);
-    return uriBuilder.build((Object[])null);
-  }
-
-  /**
-   * Binding to handle DELETE requests.
-   *
-   * @param path the path for operation.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  @DELETE
-  @Path("{path:.*}")
-  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
-  public Response delete(@PathParam("path") String path,
-                         @QueryParam(OperationParam.NAME) OperationParam op,
-                         @Context Parameters params,
-                         @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException {
-    // Do not allow DELETE commands in read-only mode
-    if (accessMode == AccessMode.READONLY) {
-      return Response.status(Response.Status.FORBIDDEN).build();
-    }
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    Response response;
-    path = makeAbsolute(path);
-    MDC.put(HttpFSConstants.OP_PARAM, op.value().name());
-    MDC.put("hostname", request.getRemoteAddr());
-    switch (op.value()) {
-    case DELETE:
-      response = handleDelete(path, params, user);
-      break;
-    case DELETESNAPSHOT:
-      response = handleDeleteSnapshot(path, params, user);
-      break;
-    default:
-      throw new IOException(
-        MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
-                             op.value()));
-    }
-    return response;
-  }
-
-  private Response handleDeleteSnapshot(String path,
-                                        Parameters params,
-                                        UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String snapshotName = params.get(SnapshotNameParam.NAME,
-        SnapshotNameParam.class);
-    FSOperations.FSDeleteSnapshot command
-        = new FSOperations.FSDeleteSnapshot(path, snapshotName);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] deleted snapshot [{}]", path, snapshotName);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleDelete(String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Boolean recursive
-        = params.get(RecursiveParam.NAME,  RecursiveParam.class);
-    AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
-    FSOperations.FSDelete command
-        = new FSOperations.FSDelete(path, recursive);
-    JSONObject json = fsExecute(user, command);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  /**
-   * Special binding for '/' as it is not handled by the wildcard binding.
-   * @param is the inputstream for the request payload.
-   * @param uriInfo the of the request.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   *           handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess related
-   *           error occurred. Thrown exceptions are handled by
-   *           {@link HttpFSExceptionProvider}.
-   */
-  @POST
-  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 })
-  public Response postRoot(InputStream is, @Context UriInfo uriInfo,
-      @QueryParam(OperationParam.NAME) OperationParam op,
-      @Context Parameters params, @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException {
-    return post(is, uriInfo, "/", op, params, request);
-  }
-
-  /**
-   * Binding to handle POST requests.
-   *
-   * @param is the inputstream for the request payload.
-   * @param uriInfo the of the request.
-   * @param path the path for operation.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  @POST
-  @Path("{path:.*}")
-  @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
-  public Response post(InputStream is,
-                       @Context UriInfo uriInfo,
-                       @PathParam("path") String path,
-                       @QueryParam(OperationParam.NAME) OperationParam op,
-                       @Context Parameters params,
-                       @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException {
-    // Do not allow POST commands in read-only mode
-    if (accessMode == AccessMode.READONLY) {
-      return Response.status(Response.Status.FORBIDDEN).build();
-    }
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    Response response;
-    path = makeAbsolute(path);
-    MDC.put(HttpFSConstants.OP_PARAM, op.value().name());
-    MDC.put("hostname", request.getRemoteAddr());
-    switch (op.value()) {
-    case APPEND:
-      response = handleAppend(is, uriInfo, path, params, user);
-      break;
-    case CONCAT:
-      response = handleConcat(path, params, user);
-      break;
-    case TRUNCATE:
-      response = handleTruncate(path, params, user);
-      break;
-    case UNSETSTORAGEPOLICY:
-      response = handleUnsetStoragePolicy(path, user);
-      break;
-    case UNSETECPOLICY:
-      response = handleUnsetECPolicy(path, user);
-      break;
-    default:
-      throw new IOException(
-        MessageFormat.format("Invalid HTTP POST operation [{0}]",
-                             op.value()));
-    }
-    return response;
-  }
-
-  private Response handleUnsetECPolicy(String path,
-                                       UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSUnSetErasureCodingPolicy command
-        = new FSOperations.FSUnSetErasureCodingPolicy(path);
-    fsExecute(user, command);
-    AUDIT_LOG.info("Unset ec policy [{}]", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleUnsetStoragePolicy(String path,
-                                            UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSUnsetStoragePolicy command
-        = new FSOperations.FSUnsetStoragePolicy(path);
-    fsExecute(user, command);
-    AUDIT_LOG.info("Unset storage policy [{}]", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleTruncate(String path,
-                                  Parameters params,
-                                  UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Long newLength = params.get(NewLengthParam.NAME, NewLengthParam.class);
-    FSOperations.FSTruncate command
-        = new FSOperations.FSTruncate(path, newLength);
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("Truncate [{}] to length [{}]", path, newLength);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleConcat(String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String sources = params.get(SourcesParam.NAME, SourcesParam.class);
-    FSOperations.FSConcat command
-        = new FSOperations.FSConcat(path, sources.split(","));
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}]", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleAppend(InputStream is,
-                                UriInfo uriInfo,
-                                String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Boolean hasData = params.get(DataParam.NAME, DataParam.class);
-    URI redirectURL = createUploadRedirectionURL(uriInfo,
-        HttpFSConstants.Operation.APPEND);
-    Boolean noRedirect
-        = params.get(NoRedirectParam.NAME, NoRedirectParam.class);
-    if (noRedirect) {
-      final String js = JsonUtil.toJsonString("Location", redirectURL);
-      response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    } else if (hasData) {
-      FSOperations.FSAppend command
-          = new FSOperations.FSAppend(is, path);
-      fsExecute(user, command);
-      AUDIT_LOG.info("[{}]", path);
-      response = Response.ok().type(MediaType.APPLICATION_JSON).build();
-    } else {
-      response = Response.temporaryRedirect(redirectURL).build();
-    }
-    return response;
-  }
-
-  /**
-   * Creates the URL for an upload operation (create or append).
-   *
-   * @param uriInfo uri info of the request.
-   * @param uploadOperation operation for the upload URL.
-   *
-   * @return the URI for uploading data.
-   */
-  protected URI createUploadRedirectionURL(UriInfo uriInfo,
-                                           Enum<?> uploadOperation) {
-    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
-    uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME,
-        uploadOperation)
-        .queryParam(DataParam.NAME, Boolean.TRUE)
-        .replaceQueryParam(NoRedirectParam.NAME, (Object[]) null);
-    return uriBuilder.build(null);
-  }
-
-  /**
-   * Special binding for '/' as it is not handled by the wildcard binding.
-   * @param is the inputstream for the request payload.
-   * @param uriInfo the of the request.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   *           handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess related
-   *           error occurred. Thrown exceptions are handled by
-   *           {@link HttpFSExceptionProvider}.
-   */
-  @PUT
-  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 })
-  public Response putRoot(InputStream is, @Context UriInfo uriInfo,
-      @QueryParam(OperationParam.NAME) OperationParam op,
-      @Context Parameters params, @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException {
-    return put(is, uriInfo, "/", op, params, request);
-  }
-
-  /**
-   * Binding to handle PUT requests.
-   *
-   * @param is the inputstream for the request payload.
-   * @param uriInfo the of the request.
-   * @param path the path for operation.
-   * @param op the HttpFS operation of the request.
-   * @param params the HttpFS parameters of the request.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
-   * error occurred. Thrown exceptions are handled by
-   * {@link HttpFSExceptionProvider}.
-   */
-  @PUT
-  @Path("{path:.*}")
-  @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
-  public Response put(InputStream is,
-                       @Context UriInfo uriInfo,
-                       @PathParam("path") String path,
-                       @QueryParam(OperationParam.NAME) OperationParam op,
-                       @Context Parameters params,
-                       @Context HttpServletRequest request)
-      throws IOException, FileSystemAccessException {
-    // Do not allow PUT commands in read-only mode
-    if (accessMode == AccessMode.READONLY) {
-      return Response.status(Response.Status.FORBIDDEN).build();
-    }
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    Response response;
-    path = makeAbsolute(path);
-    MDC.put(HttpFSConstants.OP_PARAM, op.value().name());
-    MDC.put("hostname", request.getRemoteAddr());
-    switch (op.value()) {
-    case CREATE:
-      response = handleCreate(is, uriInfo, path, params, user);
-      break;
-    case ALLOWSNAPSHOT:
-      response = handleAllowSnapshot(path, user);
-      break;
-    case DISALLOWSNAPSHOT:
-      response = handleDisallowSnapshot(path, user);
-      break;
-    case CREATESNAPSHOT:
-      response = handleCreateSnapshot(path, params, user);
-      break;
-    case SETXATTR:
-      response = handleSetXAttr(path, params, user);
-      break;
-    case RENAMESNAPSHOT:
-      response = handleRenameSnapshot(path, params, user);
-      break;
-    case REMOVEXATTR:
-      response = handleRemoveXAttr(path, params, user);
-      break;
-    case MKDIRS:
-      response = handleMkdirs(path, params, user);
-      break;
-    case RENAME:
-      response = handleRename(path, params, user);
-      break;
-    case SETOWNER:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support SETOWNER");
-      //response = handleSetOwner(path, params, user);
-      //break;
-    case SETPERMISSION:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support SETPERMISSION");
-      //response = handleSetPermission(path, params, user);
-      //break;
-    case SETREPLICATION:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support SETREPLICATION");
-      //response = handleSetReplication(path, params, user);
-      //break;
-    case SETTIMES:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support SETTIMES");
-      //response = handleSetTimes(path, params, user);
-      //break;
-    case SETACL:
-      response = handleSetACL(path, params, user);
-      break;
-    case REMOVEACL:
-      response = handleRemoveACL(path, user);
-      break;
-    case MODIFYACLENTRIES:
-      response = handleModifyACLEntries(path, params, user);
-      break;
-    case REMOVEACLENTRIES:
-      response = handleRemoveACLEntries(path, params, user);
-      break;
-    case REMOVEDEFAULTACL:
-      response = handleRemoveDefaultACL(path, user);
-      break;
-    case SETSTORAGEPOLICY:
-      response = handleSetStoragePolicy(path, params, user);
-      break;
-    case SETECPOLICY:
-      response = handleSetECPolicy(path, params, user);
-      break;
-    case SATISFYSTORAGEPOLICY:
-      throw new UnsupportedOperationException(getClass().getSimpleName()
-          + " doesn't support SATISFYSTORAGEPOLICY");
-    default:
-      throw new IOException(
-        MessageFormat.format("Invalid HTTP PUT operation [{0}]",
-                             op.value()));
-    }
-    return response;
-  }
-
-  private Response handleSetECPolicy(String path,
-                                     Parameters params,
-                                     UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String policyName = params.get(ECPolicyParam.NAME, ECPolicyParam.class);
-    FSOperations.FSSetErasureCodingPolicy command
-        = new FSOperations.FSSetErasureCodingPolicy(path, policyName);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to policy [{}]", path, policyName);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleSetStoragePolicy(String path,
-                                          Parameters params,
-                                          UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String policyName = params.get(PolicyNameParam.NAME,
-        PolicyNameParam.class);
-    FSOperations.FSSetStoragePolicy command
-        = new FSOperations.FSSetStoragePolicy(path, policyName);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to policy [{}]", path, policyName);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleRemoveDefaultACL(String path,
-                                          UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSRemoveDefaultAcl command
-        = new FSOperations.FSRemoveDefaultAcl(path);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] remove default acl", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleRemoveACLEntries(String path,
-                                          Parameters params,
-                                          UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String aclSpec = params.get(AclPermissionParam.NAME,
-            AclPermissionParam.class);
-    FSOperations.FSRemoveAclEntries command
-        = new FSOperations.FSRemoveAclEntries(path, aclSpec);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleModifyACLEntries(String path,
-                                          Parameters params,
-                                          UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String aclSpec = params.get(AclPermissionParam.NAME,
-            AclPermissionParam.class);
-    FSOperations.FSModifyAclEntries command
-        = new FSOperations.FSModifyAclEntries(path, aclSpec);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleRemoveACL(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSRemoveAcl command
-        = new FSOperations.FSRemoveAcl(path);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] removed acl", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleSetACL(String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String aclSpec = params.get(AclPermissionParam.NAME,
-            AclPermissionParam.class);
-    FSOperations.FSSetAcl command
-        = new FSOperations.FSSetAcl(path, aclSpec);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleSetTimes(String path,
-                                  Parameters params,
-                                  UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Long modifiedTime = params.get(ModifiedTimeParam.NAME,
-                                   ModifiedTimeParam.class);
-    Long accessTime = params.get(AccessTimeParam.NAME,
-                                 AccessTimeParam.class);
-    FSOperations.FSSetTimes command
-        = new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
-                   modifiedTime + ":" + accessTime);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleSetReplication(String path,
-                                        Parameters params,
-                                        UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Short replication = params.get(ReplicationParam.NAME,
-                                   ReplicationParam.class);
-    FSOperations.FSSetReplication command
-        = new FSOperations.FSSetReplication(path, replication);
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to [{}]", path, replication);
-    response = Response.ok(json).build();
-    return response;
-  }
-
-  private Response handleSetPermission(String path,
-                                       Parameters params,
-                                       UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Short permission = params.get(PermissionParam.NAME,
-                                  PermissionParam.class);
-    FSOperations.FSSetPermission command
-        = new FSOperations.FSSetPermission(path, permission);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to [{}]", path, permission);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleSetOwner(String path,
-                                  Parameters params,
-                                  UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String owner = params.get(OwnerParam.NAME, OwnerParam.class);
-    String group = params.get(GroupParam.NAME, GroupParam.class);
-    FSOperations.FSSetOwner command
-        = new FSOperations.FSSetOwner(path, owner, group);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleRename(String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
-    FSOperations.FSRename command
-        = new FSOperations.FSRename(path, toPath);
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to [{}]", path, toPath);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleMkdirs(String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Short permission = params.get(PermissionParam.NAME,
-                                   PermissionParam.class);
-    Short unmaskedPermission = params.get(UnmaskedPermissionParam.NAME,
-        UnmaskedPermissionParam.class);
-    FSOperations.FSMkdirs command =
-        new FSOperations.FSMkdirs(path, permission, unmaskedPermission);
-    JSONObject json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}] permission [{}] unmaskedpermission [{}]",
-        path, permission, unmaskedPermission);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleRemoveXAttr(String path,
-                                     Parameters params,
-                                     UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
-    FSOperations.FSRemoveXAttr command
-        = new FSOperations.FSRemoveXAttr(path, xattrName);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleRenameSnapshot(String path,
-                                        Parameters params,
-                                        UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
-        OldSnapshotNameParam.class);
-    String snapshotName = params.get(SnapshotNameParam.NAME,
-        SnapshotNameParam.class);
-    FSOperations.FSRenameSnapshot command
-        = new FSOperations.FSRenameSnapshot(path,
-        oldSnapshotName,
-        snapshotName);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] renamed snapshot [{}] to [{}]", path,
-        oldSnapshotName, snapshotName);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleSetXAttr(String path,
-                                  Parameters params,
-                                  UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String xattrName = params.get(XAttrNameParam.NAME,
-        XAttrNameParam.class);
-    String xattrValue = params.get(XAttrValueParam.NAME,
-        XAttrValueParam.class);
-    EnumSet<XAttrSetFlag> flag = params.get(XAttrSetFlagParam.NAME,
-        XAttrSetFlagParam.class);
-
-    FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(
-        path, xattrName, xattrValue, flag);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleCreateSnapshot(String path,
-                                        Parameters params,
-                                        UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    String snapshotName = params.get(SnapshotNameParam.NAME,
-        SnapshotNameParam.class);
-    FSOperations.FSCreateSnapshot command
-        = new FSOperations.FSCreateSnapshot(path, snapshotName);
-    String json = fsExecute(user, command);
-    AUDIT_LOG.info("[{}] snapshot created as [{}]", path, snapshotName);
-    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-    return response;
-  }
-
-  private Response handleDisallowSnapshot(String path,
-                                          UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSDisallowSnapshot command
-        = new FSOperations.FSDisallowSnapshot(path);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] disallowed snapshot", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleAllowSnapshot(String path, UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    FSOperations.FSAllowSnapshot command
-        = new FSOperations.FSAllowSnapshot(path);
-    fsExecute(user, command);
-    AUDIT_LOG.info("[{}] allowed snapshot", path);
-    response = Response.ok().build();
-    return response;
-  }
-
-  private Response handleCreate(InputStream is,
-                                UriInfo uriInfo,
-                                String path,
-                                Parameters params,
-                                UserGroupInformation user)
-      throws IOException, FileSystemAccessException {
-    Response response;
-    Boolean hasData = params.get(DataParam.NAME, DataParam.class);
-    URI redirectURL = createUploadRedirectionURL(uriInfo,
-        HttpFSConstants.Operation.CREATE);
-    Boolean noRedirect
-        = params.get(NoRedirectParam.NAME, NoRedirectParam.class);
-    if (noRedirect) {
-      final String js = JsonUtil.toJsonString("Location", redirectURL);
-      response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    } else if (hasData) {
-      Short permission = params.get(PermissionParam.NAME,
-                                     PermissionParam.class);
-      Short unmaskedPermission = params.get(UnmaskedPermissionParam.NAME,
-          UnmaskedPermissionParam.class);
-      Boolean override = params.get(OverwriteParam.NAME,
-                                    OverwriteParam.class);
-      Short replication = params.get(ReplicationParam.NAME,
-                                     ReplicationParam.class);
-      Long blockSize = params.get(BlockSizeParam.NAME,
-                                  BlockSizeParam.class);
-      FSOperations.FSCreate command
-          = new FSOperations.FSCreate(is, path, permission, override,
-            replication, blockSize, unmaskedPermission);
-      fsExecute(user, command);
-      AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] " +
-              "blockSize [{}] unmaskedpermission [{}]",
-              new Object[]{path,
-                  permission,
-                  override,
-                  replication,
-                  blockSize,
-                  unmaskedPermission});
-      final String js = JsonUtil.toJsonString(
-          "Location", uriInfo.getAbsolutePath());
-      response = Response.created(uriInfo.getAbsolutePath())
-          .type(MediaType.APPLICATION_JSON).entity(js).build();
-    } else {
-      response = Response.temporaryRedirect(redirectURL).build();
-    }
-    return response;
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServerWebApp.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServerWebApp.java
deleted file mode 100644
index 6d0acc5e88..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServerWebApp.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.ozone.fs.http.server.metrics.HttpFSServerMetrics;
-import org.apache.ozone.lib.server.ServerException;
-import org.apache.ozone.lib.service.FileSystemAccess;
-import org.apache.ozone.lib.servlet.ServerWebApp;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.util.JvmPauseMonitor;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Bootstrap class that manages the initialization and destruction of the
- * HttpFSServer server, it is a <code>javax.servlet.ServletContextListener
- * </code> implementation that is wired in HttpFSServer's WAR
- * <code>WEB-INF/web.xml</code>.
- * <p>
- * It provides acces to the server context via the singleton {@link #get}.
- * <p>
- * All the configuration is loaded from configuration properties prefixed
- * with <code>httpfs.</code>.
- */
-@InterfaceAudience.Private
-public class HttpFSServerWebApp extends ServerWebApp {
-  private static final Logger LOG
-      = LoggerFactory.getLogger(HttpFSServerWebApp.class);
-
-  /**
-   * Server name and prefix for all configuration properties.
-   */
-  public static final String NAME = "httpfs";
-
-  /**
-   * Configuration property that defines HttpFSServer admin group.
-   */
-  public static final String CONF_ADMIN_GROUP = "admin.group";
-
-  private static HttpFSServerWebApp server;
-  private static HttpFSServerMetrics metrics;
-
-  private String adminGroup;
-
-  /**
-   * Default constructor.
-   *
-   * @throws IOException thrown if the home/conf/log/temp directory paths
-   * could not be resolved.
-   */
-  public HttpFSServerWebApp() throws IOException {
-    super(NAME);
-  }
-
-  /**
-   * Constructor used for testing purposes.
-   */
-  public HttpFSServerWebApp(String homeDir, String configDir, String logDir,
-                               String tempDir, Configuration config) {
-    super(NAME, homeDir, configDir, logDir, tempDir, config);
-  }
-
-  /**
-   * Constructor used for testing purposes.
-   */
-  public HttpFSServerWebApp(String homeDir, Configuration config) {
-    super(NAME, homeDir, config);
-  }
-
-  /**
-   * Initializes the HttpFSServer server, loads configuration and required
-   * services.
-   *
-   * @throws ServerException thrown if HttpFSServer server could not be
-   * initialized.
-   */
-  @Override
-  public void init() throws ServerException {
-    if (server != null) {
-      throw new RuntimeException("HttpFSServer server already initialized");
-    }
-    server = this;
-    super.init();
-    adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
-    LOG.info("Connects to Namenode [{}]",
-             get().get(FileSystemAccess.class).getFileSystemConfiguration().
-               get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
-    setMetrics(getConfig());
-  }
-
-  /**
-   * Shutdowns all running services.
-   */
-  @Override
-  public void destroy() {
-    server = null;
-    if (metrics != null) {
-      metrics.shutdown();
-    }
-    super.destroy();
-  }
-
-  private static void setMetrics(Configuration config) {
-    LOG.info("Initializing HttpFSServerMetrics");
-    metrics = HttpFSServerMetrics.create(config, "HttpFSServer");
-    JvmPauseMonitor pauseMonitor = new JvmPauseMonitor();
-    pauseMonitor.init(config);
-    pauseMonitor.start();
-    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
-    FSOperations.setBufferSize(config);
-    DefaultMetricsSystem.initialize("HttpFSServer");
-  }
-  /**
-   * Returns HttpFSServer server singleton, configuration and services are
-   * accessible through it.
-   *
-   * @return the HttpFSServer server singleton.
-   */
-  public static HttpFSServerWebApp get() {
-    return server;
-  }
-
-  /**
-   * gets the HttpFSServerMetrics instance.
-   * @return the HttpFSServerMetrics singleton.
-   */
-  public static HttpFSServerMetrics getMetrics() {
-    return metrics;
-  }
-
-  /**
-   * Returns HttpFSServer admin group.
-   *
-   * @return httpfs admin group.
-   */
-  public String getAdminGroup() {
-    return adminGroup;
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServerWebServer.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServerWebServer.java
deleted file mode 100644
index 143bb74e0f..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServerWebServer.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http.server;
-
-import static org.apache.hadoop.util.StringUtils.startupShutdownMessage;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.MalformedURLException;
-import java.net.URI;
-import java.net.URL;
-import java.util.LinkedHashSet;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
-import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer;
-import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.ssl.SSLFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The HttpFS web server.
- */
-@InterfaceAudience.Private
-public class HttpFSServerWebServer {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(HttpFSServerWebServer.class);
-
-  private static final String HTTPFS_DEFAULT_XML = "httpfs-default.xml";
-  private static final String HTTPFS_SITE_XML = "httpfs-site.xml";
-
-  // HTTP properties
-  static final String HTTP_PORT_KEY = "httpfs.http.port";
-  private static final int HTTP_PORT_DEFAULT = 14000;
-  static final String HTTP_HOSTNAME_KEY = "httpfs.http.hostname";
-  private static final String HTTP_HOSTNAME_DEFAULT = "0.0.0.0";
-
-  // SSL properties
-  static final String SSL_ENABLED_KEY = "httpfs.ssl.enabled";
-  private static final boolean SSL_ENABLED_DEFAULT = false;
-
-  private static final String HTTP_ADMINS_KEY = "httpfs.http.administrators";
-
-  private static final String NAME = "webhdfs";
-  private static final String SERVLET_PATH = "/webhdfs";
-
-  static {
-    Configuration.addDefaultResource(HTTPFS_DEFAULT_XML);
-    Configuration.addDefaultResource(HTTPFS_SITE_XML);
-  }
-
-  private final HttpServer2 httpServer;
-  private final String scheme;
-
-  HttpFSServerWebServer(Configuration conf, Configuration sslConf) throws
-      Exception {
-    // Override configuration with deprecated environment variables.
-    deprecateEnv("HTTPFS_HTTP_HOSTNAME", conf, HTTP_HOSTNAME_KEY,
-        HTTPFS_SITE_XML);
-    deprecateEnv("HTTPFS_HTTP_PORT", conf, HTTP_PORT_KEY,
-        HTTPFS_SITE_XML);
-    deprecateEnv("HTTPFS_MAX_THREADS", conf,
-        HttpServer2.HTTP_MAX_THREADS_KEY, HTTPFS_SITE_XML);
-    deprecateEnv("HTTPFS_MAX_HTTP_HEADER_SIZE", conf,
-        HttpServer2.HTTP_MAX_REQUEST_HEADER_SIZE_KEY, HTTPFS_SITE_XML);
-    deprecateEnv("HTTPFS_MAX_HTTP_HEADER_SIZE", conf,
-        HttpServer2.HTTP_MAX_RESPONSE_HEADER_SIZE_KEY, HTTPFS_SITE_XML);
-    deprecateEnv("HTTPFS_SSL_ENABLED", conf, SSL_ENABLED_KEY,
-        HTTPFS_SITE_XML);
-    deprecateEnv("HTTPFS_SSL_KEYSTORE_FILE", sslConf,
-        SSLFactory.SSL_SERVER_KEYSTORE_LOCATION,
-        SSLFactory.SSL_SERVER_CONF_DEFAULT);
-    deprecateEnv("HTTPFS_SSL_KEYSTORE_PASS", sslConf,
-        SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD,
-        SSLFactory.SSL_SERVER_CONF_DEFAULT);
-
-    boolean sslEnabled = conf.getBoolean(SSL_ENABLED_KEY,
-        SSL_ENABLED_DEFAULT);
-    scheme = sslEnabled ? HttpServer2.HTTPS_SCHEME : HttpServer2.HTTP_SCHEME;
-
-    String host = conf.get(HTTP_HOSTNAME_KEY, HTTP_HOSTNAME_DEFAULT);
-    int port = conf.getInt(HTTP_PORT_KEY, HTTP_PORT_DEFAULT);
-    URI endpoint = new URI(scheme, null, host, port, null, null, null);
-
-    // Allow the default authFilter HttpFSAuthenticationFilter
-    String configuredInitializers = conf.get(HttpServer2.
-        FILTER_INITIALIZER_PROPERTY);
-    if (configuredInitializers != null) {
-      Set<String> target = new LinkedHashSet<String>();
-      String[] parts = configuredInitializers.split(",");
-      for (String filterInitializer : parts) {
-        if (!filterInitializer.equals(AuthenticationFilterInitializer.class.
-            getName()) && !filterInitializer.equals(
-            ProxyUserAuthenticationFilterInitializer.class.getName())) {
-          target.add(filterInitializer);
-        }
-      }
-      String actualInitializers =
-          org.apache.commons.lang3.StringUtils.join(target, ",");
-      conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, actualInitializers);
-    }
-
-    httpServer = new HttpServer2.Builder()
-        .setName(NAME)
-        .setConf(conf)
-        .setSSLConf(sslConf)
-        .authFilterConfigurationPrefix(HttpFSAuthenticationFilter.CONF_PREFIX)
-        .setACL(new AccessControlList(conf.get(HTTP_ADMINS_KEY, " ")))
-        .addEndpoint(endpoint)
-        .build();
-  }
-
-  /**
-   * Load the deprecated environment variable into the configuration.
-   *
-   * @param varName the environment variable name
-   * @param conf the configuration
-   * @param propName the configuration property name
-   * @param confFile the configuration file name
-   */
-  private static void deprecateEnv(String varName, Configuration conf,
-                                   String propName, String confFile) {
-    String value = System.getenv(varName);
-    if (value == null) {
-      return;
-    }
-    LOG.warn("Environment variable {} is deprecated and overriding"
-            + " property {}', please set the property in {} instead.",
-        varName, propName, confFile);
-    conf.set(propName, value, "environment variable " + varName);
-  }
-
-  public void start() throws IOException {
-    httpServer.start();
-  }
-
-  public void join() throws InterruptedException {
-    httpServer.join();
-  }
-
-  public void stop() throws Exception {
-    httpServer.stop();
-  }
-
-  public URL getUrl() {
-    InetSocketAddress addr = httpServer.getConnectorAddress(0);
-    if (null == addr) {
-      return null;
-    }
-    try {
-      return new URL(scheme, addr.getHostName(), addr.getPort(),
-          SERVLET_PATH);
-    } catch (MalformedURLException ex) {
-      throw new RuntimeException("It should never happen: " + ex.getMessage(),
-          ex);
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    startupShutdownMessage(HttpFSServerWebServer.class, args, LOG);
-    Configuration conf = new Configuration(true);
-    Configuration sslConf = SSLFactory.readSSLConfiguration(conf,
-        SSLFactory.Mode.SERVER);
-    HttpFSServerWebServer webServer =
-        new HttpFSServerWebServer(conf, sslConf);
-    webServer.start();
-    webServer.join();
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java
deleted file mode 100644
index 75d66e1b51..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java
+++ /dev/null
@@ -1,512 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http.server;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileChecksum;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
-import org.apache.hadoop.fs.QuotaUsage;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.StringUtils;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-/** JSON Utilities. */
-final class JsonUtil {
-  private static final Object[] EMPTY_OBJECT_ARRAY = {};
-
-  private JsonUtil() {
-  }
-
-  // Reuse ObjectMapper instance for improving performance.
-  // ObjectMapper is thread safe as long as we always configure instance
-  // before use. We don't have a re-entrant call pattern in WebHDFS,
-  // so we just need to worry about thread-safety.
-  private static final ObjectMapper MAPPER = new ObjectMapper();
-
-  /** Convert a token object to a Json string. */
-  public static String toJsonString(final Token<?
-      extends TokenIdentifier> token) throws IOException {
-    return toJsonString(Token.class, toJsonMap(token));
-  }
-
-  private static Map<String, Object> toJsonMap(
-      final Token<? extends TokenIdentifier> token) throws IOException {
-    if (token == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("urlString", token.encodeToUrlString());
-    return m;
-  }
-
-  /** Convert an exception object to a Json string. */
-  public static String toJsonString(final Exception e) {
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("exception", e.getClass().getSimpleName());
-    m.put("message", e.getMessage());
-    m.put("javaClassName", e.getClass().getName());
-    return toJsonString(RemoteException.class, m);
-  }
-
-  private static String toJsonString(final Class<?> clazz, final Object value) {
-    return toJsonString(clazz.getSimpleName(), value);
-  }
-
-  /** Convert a key-value pair to a Json string. */
-  public static String toJsonString(final String key, final Object value) {
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put(key, value);
-    try {
-      return MAPPER.writeValueAsString(m);
-    } catch (IOException ignored) {
-    }
-    return null;
-  }
-
-  /** Convert a FsPermission object to a string. */
-  private static String toString(final FsPermission permission) {
-    return String.format("%o", permission.toShort());
-  }
-
-  /** Convert an ExtendedBlock to a Json map. */
-  private static Map<String, Object> toJsonMap(
-      final ExtendedBlock extendedblock) {
-    if (extendedblock == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("blockPoolId", extendedblock.getBlockPoolId());
-    m.put("blockId", extendedblock.getBlockId());
-    m.put("numBytes", extendedblock.getNumBytes());
-    m.put("generationStamp", extendedblock.getGenerationStamp());
-    return m;
-  }
-
-  /** Convert a DatanodeInfo to a Json map. */
-  static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
-    if (datanodeinfo == null) {
-      return null;
-    }
-
-    // TODO: Fix storageID
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("ipAddr", datanodeinfo.getIpAddr());
-    // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x) 
-    // expects this instead of the two fields.
-    m.put("name", datanodeinfo.getXferAddr());
-    m.put("hostName", datanodeinfo.getHostName());
-    m.put("storageID", datanodeinfo.getDatanodeUuid());
-    m.put("xferPort", datanodeinfo.getXferPort());
-    m.put("infoPort", datanodeinfo.getInfoPort());
-    m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
-    m.put("ipcPort", datanodeinfo.getIpcPort());
-
-    m.put("capacity", datanodeinfo.getCapacity());
-    m.put("dfsUsed", datanodeinfo.getDfsUsed());
-    m.put("remaining", datanodeinfo.getRemaining());
-    m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
-    m.put("cacheCapacity", datanodeinfo.getCacheCapacity());
-    m.put("cacheUsed", datanodeinfo.getCacheUsed());
-    m.put("lastUpdate", datanodeinfo.getLastUpdate());
-    m.put("lastUpdateMonotonic", datanodeinfo.getLastUpdateMonotonic());
-    m.put("xceiverCount", datanodeinfo.getXceiverCount());
-    m.put("networkLocation", datanodeinfo.getNetworkLocation());
-    m.put("adminState", datanodeinfo.getAdminState().name());
-    if (datanodeinfo.getUpgradeDomain() != null) {
-      m.put("upgradeDomain", datanodeinfo.getUpgradeDomain());
-    }
-    m.put("lastBlockReportTime", datanodeinfo.getLastBlockReportTime());
-    m.put("lastBlockReportMonotonic",
-        datanodeinfo.getLastBlockReportMonotonic());
-    return m;
-  }
-
-  /** Convert a DatanodeInfo[] to a Json array. */
-  private static Object[] toJsonArray(final DatanodeInfo[] array) {
-    if (array == null) {
-      return null;
-    } else if (array.length == 0) {
-      return EMPTY_OBJECT_ARRAY;
-    } else {
-      final Object[] a = new Object[array.length];
-      for (int i = 0; i < array.length; i++) {
-        a[i] = toJsonMap(array[i]);
-      }
-      return a;
-    }
-  }
-
-  /** Convert a StorageType[] to a Json array. */
-  private static Object[] toJsonArray(final StorageType[] array) {
-    if (array == null) {
-      return null;
-    } else if (array.length == 0) {
-      return EMPTY_OBJECT_ARRAY;
-    } else {
-      final Object[] a = new Object[array.length];
-      for (int i = 0; i < array.length; i++) {
-        a[i] = array[i];
-      }
-      return a;
-    }
-  }
-
-  /** Convert a LocatedBlock to a Json map. */
-  private static Map<String, Object> toJsonMap(
-      final LocatedBlock locatedblock) throws IOException {
-    if (locatedblock == null) {
-      return null;
-    }
- 
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("blockToken", toJsonMap(locatedblock.getBlockToken()));
-    m.put("isCorrupt", locatedblock.isCorrupt());
-    m.put("startOffset", locatedblock.getStartOffset());
-    m.put("block", toJsonMap(locatedblock.getBlock()));
-    m.put("storageTypes", toJsonArray(locatedblock.getStorageTypes()));
-    m.put("locations", toJsonArray(locatedblock.getLocations()));
-    m.put("cachedLocations", toJsonArray(locatedblock.getCachedLocations()));
-    return m;
-  }
-
-  /** Convert a LocatedBlock[] to a Json array. */
-  private static Object[] toJsonArray(
-      final List<LocatedBlock> array) throws IOException {
-    if (array == null) {
-      return null;
-    } else if (array.size() == 0) {
-      return EMPTY_OBJECT_ARRAY;
-    } else {
-      final Object[] a = new Object[array.size()];
-      for (int i = 0; i < array.size(); i++) {
-        a[i] = toJsonMap(array.get(i));
-      }
-      return a;
-    }
-  }
-
-  /** Convert LocatedBlocks to a Json string. */
-  public static String toJsonString(
-      final LocatedBlocks locatedblocks) throws IOException {
-    if (locatedblocks == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("fileLength", locatedblocks.getFileLength());
-    m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
-
-    m.put("locatedBlocks", toJsonArray(locatedblocks.getLocatedBlocks()));
-    m.put("lastLocatedBlock", toJsonMap(locatedblocks.getLastLocatedBlock()));
-    m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
-    return toJsonString(LocatedBlocks.class, m);
-  }
-
-  /** Convert a ContentSummary to a Json string. */
-  public static String toJsonString(final ContentSummary contentsummary) {
-    if (contentsummary == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("length", contentsummary.getLength());
-    m.put("fileCount", contentsummary.getFileCount());
-    m.put("directoryCount", contentsummary.getDirectoryCount());
-    m.put("ecPolicy", contentsummary.getErasureCodingPolicy());
-    // For ContentSummary we don't need this since we already have
-    // separate count for file and directory.
-    m.putAll(toJsonMap(contentsummary, false));
-    m.put("snapshotLength", contentsummary.getSnapshotLength());
-    m.put("snapshotFileCount", contentsummary.getSnapshotFileCount());
-    m.put("snapshotDirectoryCount",
-        contentsummary.getSnapshotDirectoryCount());
-    m.put("snapshotSpaceConsumed", contentsummary.getSnapshotSpaceConsumed());
-    return toJsonString(ContentSummary.class, m);
-  }
-
-  /** Convert a QuotaUsage to a JSON string. */
-  public static String toJsonString(final QuotaUsage quotaUsage) {
-    if (quotaUsage == null) {
-      return null;
-    }
-    return toJsonString(QuotaUsage.class, toJsonMap(quotaUsage, true));
-  }
-
-  private static Map<String, Object> toJsonMap(
-      final QuotaUsage quotaUsage, boolean includeFileAndDirectoryCount) {
-    final Map<String, Object> m = new TreeMap<>();
-    if (includeFileAndDirectoryCount) {
-      m.put("fileAndDirectoryCount", quotaUsage.getFileAndDirectoryCount());
-    }
-    m.put("quota", quotaUsage.getQuota());
-    m.put("spaceConsumed", quotaUsage.getSpaceConsumed());
-    m.put("spaceQuota", quotaUsage.getSpaceQuota());
-    final Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
-    for (StorageType t : StorageType.getTypesSupportingQuota()) {
-      long tQuota = quotaUsage.getTypeQuota(t);
-      if (tQuota != HdfsConstants.QUOTA_RESET) {
-        Map<String, Long> type = typeQuota.get(t.toString());
-        if (type == null) {
-          type = new TreeMap<>();
-          typeQuota.put(t.toString(), type);
-        }
-        type.put("quota", quotaUsage.getTypeQuota(t));
-        type.put("consumed", quotaUsage.getTypeConsumed(t));
-      }
-    }
-    m.put("typeQuota", typeQuota);
-    return m;
-  }
-
-  /** Convert a MD5MD5CRC32FileChecksum to a Json string. */
-  public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) {
-    if (checksum == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("algorithm", checksum.getAlgorithmName());
-    m.put("length", checksum.getLength());
-    m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
-    return toJsonString(FileChecksum.class, m);
-  }
-
-  /** Convert a AclStatus object to a Json string. */
-  public static String toJsonString(final AclStatus status) {
-    if (status == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("owner", status.getOwner());
-    m.put("group", status.getGroup());
-    m.put("stickyBit", status.isStickyBit());
-
-    final List<String> stringEntries = new ArrayList<>();
-    for (AclEntry entry : status.getEntries()) {
-      stringEntries.add(entry.toStringStable());
-    }
-    m.put("entries", stringEntries);
-
-    FsPermission perm = status.getPermission();
-    if (perm != null) {
-      m.put("permission", toString(perm));
-    }
-    final Map<String, Map<String, Object>> finalMap =
-        new TreeMap<String, Map<String, Object>>();
-    finalMap.put(AclStatus.class.getSimpleName(), m);
-
-    try {
-      return MAPPER.writeValueAsString(finalMap);
-    } catch (IOException ignored) {
-    }
-    return null;
-  }
-
-  private static Map<String, Object> toJsonMap(final XAttr xAttr,
-      final XAttrCodec encoding) throws IOException {
-    if (xAttr == null) {
-      return null;
-    }
- 
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("name", XAttrHelper.getPrefixedName(xAttr));
-    m.put("value", xAttr.getValue() != null ?
-        XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
-    return m;
-  }
-  
-  private static Object[] toJsonArray(final List<XAttr> array,
-      final XAttrCodec encoding) throws IOException {
-    if (array == null) {
-      return null;
-    } else if (array.size() == 0) {
-      return EMPTY_OBJECT_ARRAY;
-    } else {
-      final Object[] a = new Object[array.size()];
-      for (int i = 0; i < array.size(); i++) {
-        a[i] = toJsonMap(array.get(i), encoding);
-      }
-      return a;
-    }
-  }
-  
-  public static String toJsonString(final List<XAttr> xAttrs,
-      final XAttrCodec encoding) throws IOException {
-    final Map<String, Object> finalMap = new TreeMap<String, Object>();
-    finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
-    return MAPPER.writeValueAsString(finalMap);
-  }
-  
-  public static String toJsonString(final List<XAttr> xAttrs)
-      throws IOException {
-    final List<String> names = new ArrayList<>(xAttrs.size());
-    for (XAttr xAttr : xAttrs) {
-      names.add(XAttrHelper.getPrefixedName(xAttr));
-    }
-    String ret = MAPPER.writeValueAsString(names);
-    final Map<String, Object> finalMap = new TreeMap<String, Object>();
-    finalMap.put("XAttrNames", ret);
-    return MAPPER.writeValueAsString(finalMap);
-  }
-
-  public static String toJsonString(Object obj) throws IOException {
-    return MAPPER.writeValueAsString(obj);
-  }
-
-  public static String toJsonString(BlockStoragePolicy[] storagePolicies) {
-    final Map<String, Object> blockStoragePolicies = new TreeMap<>();
-    Object[] a = null;
-    if (storagePolicies != null && storagePolicies.length > 0) {
-      a = new Object[storagePolicies.length];
-      for (int i = 0; i < storagePolicies.length; i++) {
-        a[i] = toJsonMap(storagePolicies[i]);
-      }
-    }
-    blockStoragePolicies.put("BlockStoragePolicy", a);
-    return toJsonString("BlockStoragePolicies", blockStoragePolicies);
-  }
-
-  private static Object toJsonMap(BlockStoragePolicy blockStoragePolicy) {
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("id", blockStoragePolicy.getId());
-    m.put("name", blockStoragePolicy.getName());
-    m.put("storageTypes", blockStoragePolicy.getStorageTypes());
-    m.put("creationFallbacks", blockStoragePolicy.getCreationFallbacks());
-    m.put("replicationFallbacks", blockStoragePolicy.getReplicationFallbacks());
-    m.put("copyOnCreateFile", blockStoragePolicy.isCopyOnCreateFile());
-    return m;
-  }
-
-  public static String toJsonString(BlockStoragePolicy storagePolicy) {
-    return toJsonString(BlockStoragePolicy.class, toJsonMap(storagePolicy));
-  }
-
-  public static String toJsonString(FsServerDefaults serverDefaults) {
-    return toJsonString(FsServerDefaults.class, toJsonMap(serverDefaults));
-  }
-
-  private static Object toJsonMap(FsServerDefaults serverDefaults) {
-    final Map<String, Object> m = new HashMap<String, Object>();
-    m.put("blockSize", serverDefaults.getBlockSize());
-    m.put("bytesPerChecksum", serverDefaults.getBytesPerChecksum());
-    m.put("writePacketSize", serverDefaults.getWritePacketSize());
-    m.put("replication", serverDefaults.getReplication());
-    m.put("fileBufferSize", serverDefaults.getFileBufferSize());
-    m.put("encryptDataTransfer", serverDefaults.getEncryptDataTransfer());
-    m.put("trashInterval", serverDefaults.getTrashInterval());
-    m.put("checksumType", serverDefaults.getChecksumType().id);
-    m.put("keyProviderUri", serverDefaults.getKeyProviderUri());
-    m.put("defaultStoragePolicyId", serverDefaults.getDefaultStoragePolicyId());
-    return m;
-  }
-
-  public static String toJsonString(SnapshotDiffReport diffReport) {
-    return toJsonString(SnapshotDiffReport.class.getSimpleName(),
-        toJsonMap(diffReport));
-  }
-
-  private static Object toJsonMap(SnapshotDiffReport diffReport) {
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("snapshotRoot", diffReport.getSnapshotRoot());
-    m.put("fromSnapshot", diffReport.getFromSnapshot());
-    m.put("toSnapshot", diffReport.getLaterSnapshotName());
-    Object[] diffList = new Object[diffReport.getDiffList().size()];
-    for (int i = 0; i < diffReport.getDiffList().size(); i++) {
-      diffList[i] = toJsonMap(diffReport.getDiffList().get(i));
-    }
-    m.put("diffList", diffList);
-    return m;
-  }
-
-  private static Object toJsonMap(
-      SnapshotDiffReport.DiffReportEntry diffReportEntry) {
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("type", diffReportEntry.getType());
-    if (diffReportEntry.getSourcePath() != null) {
-      m.put("sourcePath",
-          DFSUtilClient.bytes2String(diffReportEntry.getSourcePath()));
-    }
-    if (diffReportEntry.getTargetPath() != null) {
-      m.put("targetPath",
-          DFSUtilClient.bytes2String(diffReportEntry.getTargetPath()));
-    }
-    return m;
-  }
-
-  private static Map<String, Object> toJsonMap(
-      final BlockLocation blockLocation) throws IOException {
-    if (blockLocation == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new HashMap<>();
-    m.put("length", blockLocation.getLength());
-    m.put("offset", blockLocation.getOffset());
-    m.put("corrupt", blockLocation.isCorrupt());
-    m.put("storageTypes", toJsonArray(blockLocation.getStorageTypes()));
-    m.put("cachedHosts", blockLocation.getCachedHosts());
-    m.put("hosts", blockLocation.getHosts());
-    m.put("names", blockLocation.getNames());
-    m.put("topologyPaths", blockLocation.getTopologyPaths());
-    return m;
-  }
-
-  public static String toJsonString(BlockLocation[] locations)
-      throws IOException {
-    if (locations == null) {
-      return null;
-    }
-    final Map<String, Object> m = new HashMap<>();
-    Object[] blockLocations = new Object[locations.length];
-    for (int i = 0; i < locations.length; i++) {
-      blockLocations[i] = toJsonMap(locations[i]);
-    }
-    m.put(BlockLocation.class.getSimpleName(), blockLocations);
-    return toJsonString("BlockLocations", m);
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/metrics/HttpFSServerMetrics.java
deleted file mode 100644
index ffd043e8ff..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/metrics/HttpFSServerMetrics.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.fs.http.server.metrics;
-
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.source.JvmMetrics;
-
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- *
- * This class is for maintaining  the various HttpFSServer statistics
- * and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- *  for example:
- *  <p> {@link #bytesRead}.inc()
- *
- */
-@InterfaceAudience.Private
-@Metrics(about = "HttpFSServer metrics", context = "httpfs")
-public class HttpFSServerMetrics {
-
-  private static final String DFS_METRICS_SESSION_ID_KEY
-      = "dfs.metrics.session-id";
-  private @Metric MutableCounterLong bytesWritten;
-  private @Metric MutableCounterLong bytesRead;
-
-  // Write ops
-  private @Metric MutableCounterLong opsCreate;
-  private @Metric MutableCounterLong opsAppend;
-  private @Metric MutableCounterLong opsTruncate;
-  private @Metric MutableCounterLong opsDelete;
-  private @Metric MutableCounterLong opsRename;
-  private @Metric MutableCounterLong opsMkdir;
-
-  // Read ops
-  private @Metric MutableCounterLong opsOpen;
-  private @Metric MutableCounterLong opsListing;
-  private @Metric MutableCounterLong opsStat;
-  private @Metric MutableCounterLong opsCheckAccess;
-
-  private final MetricsRegistry registry = new MetricsRegistry("httpfsserver");
-  private final String name;
-  private JvmMetrics jvmMetrics = null;
-
-  public HttpFSServerMetrics(String name, String sessionId,
-      final JvmMetrics jvmMetrics) {
-    this.name = name;
-    this.jvmMetrics = jvmMetrics;
-    registry.tag(SessionId, sessionId);
-  }
-
-  public static HttpFSServerMetrics create(Configuration conf,
-      String serverName) {
-    String sessionId = conf.get(DFS_METRICS_SESSION_ID_KEY);
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    JvmMetrics jm = JvmMetrics.create("HttpFSServer", sessionId, ms);
-    String name = "ServerActivity-" + (serverName.isEmpty()
-        ? "UndefinedServer" + ThreadLocalRandom.current().nextInt()
-        : serverName.replace(':', '-'));
-
-    return ms.register(name, null, new HttpFSServerMetrics(name,
-        sessionId, jm));
-  }
-
-  public String name() {
-    return name;
-  }
-
-  public JvmMetrics getJvmMetrics() {
-    return jvmMetrics;
-  }
-
-  public void incrBytesWritten(long bytes) {
-    bytesWritten.incr(bytes);
-  }
-
-  public void incrBytesRead(long bytes) {
-    bytesRead.incr(bytes);
-  }
-
-  public void incrOpsCreate() {
-    opsCreate.incr();
-  }
-
-  public void incrOpsAppend() {
-    opsAppend.incr();
-  }
-
-  public void incrOpsTruncate() {
-    opsTruncate.incr();
-  }
-
-  public void incrOpsDelete() {
-    opsDelete.incr();
-  }
-
-  public void incrOpsRename() {
-    opsRename.incr();
-  }
-
-  public void incrOpsMkdir() {
-    opsMkdir.incr();
-  }
-
-  public void incrOpsOpen() {
-    opsOpen.incr();
-  }
-
-  public void incrOpsListing() {
-    opsListing.incr();
-  }
-
-  public void incrOpsStat() {
-    opsStat.incr();
-  }
-
-  public void incrOpsCheckAccess() {
-    opsCheckAccess.incr();
-  }
-
-  public void shutdown() {
-    DefaultMetricsSystem.shutdown();
-  }
-
-  public long getOpsMkdir() {
-    return opsMkdir.value();
-  }
-
-  public long getOpsListing() {
-    return opsListing.value();
-  }
-
-  public long getOpsStat() {
-    return opsStat.value();
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/metrics/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/metrics/package-info.java
deleted file mode 100644
index da21f0daf6..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/metrics/package-info.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * A package to implement metrics for the HttpFS Server.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-package org.apache.ozone.fs.http.server.metrics;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/package-info.java
deleted file mode 100644
index 7762e23e86..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Basic server implementations for the HttpFS.
- */
-package org.apache.ozone.fs.http.server;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/hdfs/web/WebHdfsConstants.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/hdfs/web/WebHdfsConstants.java
deleted file mode 100644
index 7256a7c04e..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/hdfs/web/WebHdfsConstants.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.hdfs.web;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.io.Text;
-
-/**
- * Declared WebHdfs constants.
- */
-@InterfaceAudience.Private
-public final class WebHdfsConstants {
-  public static final String WEBHDFS_SCHEME = "webhdfs";
-  public static final String SWEBHDFS_SCHEME = "swebhdfs";
-  public static final Text WEBHDFS_TOKEN_KIND
-      = new Text("WEBHDFS delegation");
-  public static final Text SWEBHDFS_TOKEN_KIND
-      = new Text("SWEBHDFS delegation");
-
-  private WebHdfsConstants() {
-  }
-
-  enum PathType {
-    FILE, DIRECTORY, SYMLINK;
-
-    static PathType valueOf(HdfsFileStatus status) {
-      if (status.isDirectory()) {
-        return DIRECTORY;
-      }
-      if (status.isSymlink()) {
-        return SYMLINK;
-      }
-      return FILE;
-    }
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/hdfs/web/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/hdfs/web/package-info.java
deleted file mode 100644
index 02c9467ee9..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/hdfs/web/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Webhdfs implementations.
- */
-package org.apache.ozone.hdfs.web;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/RunnableCallable.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/RunnableCallable.java
deleted file mode 100644
index b8ce703835..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/RunnableCallable.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.lang;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.util.Check;
-
-import java.util.concurrent.Callable;
-
-/**
- * Adapter class that allows <code>Runnable</code>s and <code>Callable</code>s
- * to be treated as the other.
- */
-@InterfaceAudience.Private
-public class RunnableCallable implements Callable<Void>, Runnable {
-  private Runnable runnable;
-  private Callable<?> callable;
-
-  /**
-   * Constructor that takes a runnable.
-   *
-   * @param runnable runnable.
-   */
-  public RunnableCallable(Runnable runnable) {
-    this.runnable = Check.notNull(runnable, "runnable");
-  }
-
-  /**
-   * Constructor that takes a callable.
-   *
-   * @param callable callable.
-   */
-  public RunnableCallable(Callable<?> callable) {
-    this.callable = Check.notNull(callable, "callable");
-  }
-
-  /**
-   * Invokes the wrapped callable/runnable as a callable.
-   *
-   * @return void
-   *
-   * @throws Exception thrown by the wrapped callable/runnable invocation.
-   */
-  @Override
-  public Void call() throws Exception {
-    if (runnable != null) {
-      runnable.run();
-    } else {
-      callable.call();
-    }
-    return null;
-  }
-
-  /**
-   * Invokes the wrapped callable/runnable as a runnable.
-   *
-   * @throws RuntimeException thrown by the wrapped callable/runnable
-   * invocation.
-   */
-  @Override
-  public void run() {
-    if (runnable != null) {
-      runnable.run();
-    } else {
-      try {
-        callable.call();
-      } catch (Exception ex) {
-        throw new RuntimeException(ex);
-      }
-    }
-  }
-
-  /**
-   * Returns the class name of the wrapper callable/runnable.
-   *
-   * @return the class name of the wrapper callable/runnable.
-   */
-  @Override
-  public String toString() {
-    return (runnable != null) ? runnable.getClass().getSimpleName()
-        : callable.getClass().getSimpleName();
-  }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java
deleted file mode 100644
index f20395f96b..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.lang;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.util.Check;
-
-import java.text.MessageFormat;
-
-/**
- * Generic exception that requires error codes and uses the a message
- * template from the error code.
- */
-@InterfaceAudience.Private
-public class XException extends Exception {
-
-  /**
-   * Interface to define error codes.
-   */
-  public interface ERROR {
-
-    /**
-     * Returns the template for the error.
-     *
-     * @return the template for the error, the template must be in JDK
-     *         <code>MessageFormat</code> syntax (using {#} positional
-     *         parameters).
-     */
-    String getTemplate();
-
-  }
-
-  private ERROR error;
-
-  /**
-   * Private constructor used by the public constructors.
-   *
-   * @param error error code.
-   * @param message error message.
-   * @param cause exception cause if any.
-   */
-  private XException(ERROR error, String message, Throwable cause) {
-    super(message, cause);
-    this.error = error;
-  }
-
-  /**
-   * Creates an XException using another XException as cause.
-   * <p>
-   * The error code and error message are extracted from the cause.
-   *
-   * @param cause exception cause.
-   */
-  public XException(XException cause) {
-    this(cause.getError(), cause.getMessage(), cause);
-  }
-
-  /**
-   * Creates an XException using the specified error code. The exception
-   * message is resolved using the error code template and the passed
-   * parameters.
-   *
-   * @param error error code for the XException.
-   * @param params parameters to use when creating the error message
-   * with the error code template.
-   */
-  @SuppressWarnings({"ThrowableResultOfMethodCallIgnored"})
-  public XException(ERROR error, Object... params) {
-    this(Check.notNull(error, "error"),
-        format(error, params),
-        getCause(params));
-  }
-
-  /**
-   * Returns the error code of the exception.
-   *
-   * @return the error code of the exception.
-   */
-  public ERROR getError() {
-    return error;
-  }
-
-  /**
-   * Creates a message using a error message template and arguments.
-   * <p>
-   * The template must be in JDK <code>MessageFormat</code> syntax
-   * (using {#} positional parameters).
-   *
-   * @param error error code, to get the template from.
-   * @param args arguments to use for creating the message.
-   *
-   * @return the resolved error message.
-   */
-  private static String format(ERROR error, Object... args) {
-    String template = error.getTemplate();
-    if (template == null) {
-      StringBuilder sb = new StringBuilder();
-      for (int i = 0; i < args.length; i++) {
-        sb.append(" {").append(i).append("}");
-      }
-      template = sb.deleteCharAt(0).toString();
-    }
-    return error + ": " + MessageFormat.format(template, args);
-  }
-
-  /**
-   * Returns the last parameter if it is an instance of <code>Throwable</code>
-   * returns it else it returns NULL.
-   *
-   * @param params parameters to look for a cause.
-   *
-   * @return the last parameter if it is an instance of <code>Throwable</code>
-   *         returns it else it returns NULL.
-   */
-  private static Throwable getCause(Object... params) {
-    Throwable throwable = null;
-    if (params != null && params.length > 0 && params[params.length - 1]
-        instanceof Throwable) {
-      throwable = (Throwable) params[params.length - 1];
-    }
-    return throwable;
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/package-info.java
deleted file mode 100644
index 357cc62c07..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Basic functionalities.
- */
-package org.apache.ozone.lib.lang;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/BaseService.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/BaseService.java
deleted file mode 100644
index e295447065..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/BaseService.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.util.ConfigurationUtils;
-
-import java.util.Map;
-
-/**
- * Convenience class implementing the {@link Service} interface.
- */
-@InterfaceAudience.Private
-public abstract class BaseService implements Service {
-  private String prefix;
-  private Server server;
-  private Configuration serviceConfig;
-
-  /**
-   * Service constructor.
-   *
-   * @param prefix service prefix.
-   */
-  public BaseService(String prefix) {
-    this.prefix = prefix;
-  }
-
-  /**
-   * Initializes the service.
-   * <p>
-   * It collects all service properties (properties having the
-   * <code>#SERVER#.#SERVICE#.</code> prefix). The property names are then
-   * trimmed from the <code>#SERVER#.#SERVICE#.</code> prefix.
-   * <p>
-   * After collecting  the service properties it delegates to the
-   * {@link #init()} method.
-   *
-   * @param s the s initializing the service, give access to the
-   * s context.
-   *
-   * @throws ServiceException thrown if the service could not be initialized.
-   */
-  @Override
-  public final void init(Server s) throws ServiceException {
-    this.server = s;
-    String servicePrefix = getPrefixedName("");
-    serviceConfig = new Configuration(false);
-    for (Map.Entry<String, String> entry : ConfigurationUtils
-        .resolve(s.getConfig())) {
-      String key = entry.getKey();
-      if (key.startsWith(servicePrefix)) {
-        serviceConfig.set(key.substring(servicePrefix.length()),
-            entry.getValue());
-      }
-    }
-    init();
-  }
-
-
-  /**
-   * Post initializes the service. This method is called by the
-   * {@link Server} after all services of the server have been initialized.
-   * <p>
-   * This method does a NOP.
-   *
-   * @throws ServiceException thrown if the service could not be
-   * post-initialized.
-   */
-  @Override
-  public void postInit() throws ServiceException {
-  }
-
-  /**
-   * Destroy the services.  This method is called once, when the
-   * {@link Server} owning the service is being destroyed.
-   * <p>
-   * This method does a NOP.
-   */
-  @Override
-  public void destroy() {
-  }
-
-  /**
-   * Returns the service dependencies of this service. The service will be
-   * instantiated only if all the service dependencies are already initialized.
-   * <p>
-   * This method returns an empty array (size 0)
-   *
-   * @return an empty array (size 0).
-   */
-  @Override
-  public Class[] getServiceDependencies() {
-    return new Class[0];
-  }
-
-  /**
-   * Notification callback when the server changes its status.
-   * <p>
-   * This method returns an empty array (size 0)
-   *
-   * @param oldStatus old server status.
-   * @param newStatus new server status.
-   *
-   * @throws ServiceException thrown if the service could not process the status
-   * change.
-   */
-  @Override
-  public void serverStatusChange(Server.Status oldStatus,
-                                 Server.Status newStatus)
-      throws ServiceException {
-  }
-
-  /**
-   * Returns the service prefix.
-   *
-   * @return the service prefix.
-   */
-  protected String getPrefix() {
-    return prefix;
-  }
-
-  /**
-   * Returns the server owning the service.
-   *
-   * @return the server owning the service.
-   */
-  protected Server getServer() {
-    return server;
-  }
-
-  /**
-   * Returns the full prefixed name of a service property.
-   *
-   * @param name of the property.
-   *
-   * @return prefixed name of the property.
-   */
-  protected String getPrefixedName(String name) {
-    return server.getPrefixedName(prefix + "." + name);
-  }
-
-  /**
-   * Returns the service configuration properties. Property
-   * names are trimmed off from its prefix.
-   * <p>
-   * The sevice configuration properties are all properties
-   * with names starting with <code>#SERVER#.#SERVICE#.</code>
-   * in the server configuration.
-   *
-   * @return the service configuration properties with names
-   *         trimmed off from their <code>#SERVER#.#SERVICE#.</code>
-   *         prefix.
-   */
-  protected Configuration getServiceConfig() {
-    return serviceConfig;
-  }
-
-  /**
-   * Initializes the server.
-   * <p>
-   * This method is called by {@link #init(Server)} after all service properties
-   * (properties prefixed with
-   *
-   * @throws ServiceException thrown if the service could not be initialized.
-   */
-  protected abstract void init() throws ServiceException;
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java
deleted file mode 100644
index eb86981e75..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java
+++ /dev/null
@@ -1,841 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.server;
-
-import org.apache.hadoop.conf.ConfigRedactor;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.util.Check;
-import org.apache.ozone.lib.util.ConfigurationUtils;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.PropertyConfigurator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Files;
-import java.text.MessageFormat;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * A Server class provides standard configuration, logging and {@link Service}
- * lifecyle management.
- * <p>
- * A Server normally has a home directory, a configuration directory, a temp
- * directory and logs directory.
- * <p>
- * The Server configuration is loaded from 2 overlapped files,
- * <code>#SERVER#-default.xml</code> and <code>#SERVER#-site.xml</code>. The
- * default file is loaded from the classpath, the site file is laoded from the
- * configuration directory.
- * <p>
- * The Server collects all configuration properties prefixed with
- * <code>#SERVER#</code>. The property names are then trimmed from the
- * <code>#SERVER#</code> prefix.
- * <p>
- * The Server log configuration is loaded from the
- * <code>#SERVICE#-log4j.properties</code> file in the configuration directory.
- * <p>
- * The lifecycle of server is defined in by {@link Server.Status} enum.
- * When a server is create, its status is UNDEF, when being initialized it is
- * BOOTING, once initialization is complete by default transitions to NORMAL.
- * The <code>#SERVER#.startup.status</code> configuration property can be used
- * to specify a different startup status (NORMAL, ADMIN or HALTED).
- * <p>
- * Services classes are defined in the <code>#SERVER#.services</code> and
- * <code>#SERVER#.services.ext</code> properties. They are loaded in order
- * (services first, then services.ext).
- * <p>
- * Before initializing the services, they are traversed and duplicate service
- * interface are removed from the service list. The last service using a given
- * interface wins (this enables a simple override mechanism).
- * <p>
- * After the services have been resoloved by interface de-duplication they are
- * initialized in order. Once all services are initialized they are
- * post-initialized (this enables late/conditional service bindings).
- */
-@InterfaceAudience.Private
-public class Server {
-  private Logger log;
-
-  /**
-   * Server property name that defines the service classes.
-   */
-  public static final String CONF_SERVICES = "services";
-
-  /**
-   * Server property name that defines the service extension classes.
-   */
-  public static final String CONF_SERVICES_EXT = "services.ext";
-
-  /**
-   * Server property name that defines server startup status.
-   */
-  public static final String CONF_STARTUP_STATUS = "startup.status";
-
-  /**
-   * Enumeration that defines the server status.
-   */
-  @InterfaceAudience.Private
-  public enum Status {
-    UNDEF(false, false),
-    BOOTING(false, true),
-    HALTED(true, true),
-    ADMIN(true, true),
-    NORMAL(true, true),
-    SHUTTING_DOWN(false, true),
-    SHUTDOWN(false, false);
-
-    private boolean settable;
-    private boolean operational;
-
-    /**
-     * Status constructor.
-     *
-     * @param settable indicates if the status is settable.
-     * @param operational indicates if the server is operational
-     * when in this status.
-     */
-    Status(boolean settable, boolean operational) {
-      this.settable = settable;
-      this.operational = operational;
-    }
-
-    /**
-     * Returns if this server status is operational.
-     *
-     * @return if this server status is operational.
-     */
-    public boolean isOperational() {
-      return operational;
-    }
-  }
-
-  /**
-   * Name of the log4j configuration file the Server will load from the
-   * classpath if the <code>#SERVER#-log4j.properties</code> is not defined
-   * in the server configuration directory.
-   */
-  public static final String DEFAULT_LOG4J_PROPERTIES
-      = "default-log4j.properties";
-
-  private Status status;
-  private String name;
-  private String homeDir;
-  private String configDir;
-  private String logDir;
-  private String tempDir;
-  private Configuration config;
-  private Map<Class, Service> services = new LinkedHashMap<Class, Service>();
-
-  /**
-   * Creates a server instance.
-   * <p>
-   * The config, log and temp directories are all under the specified home
-   * directory.
-   *
-   * @param name server name.
-   * @param homeDir server home directory.
-   */
-  public Server(String name, String homeDir) {
-    this(name, homeDir, null);
-  }
-
-  /**
-   * Creates a server instance.
-   *
-   * @param name server name.
-   * @param homeDir server home directory.
-   * @param configDir config directory.
-   * @param logDir log directory.
-   * @param tempDir temp directory.
-   */
-  public Server(String name,
-                String homeDir,
-                String configDir,
-                String logDir,
-                String tempDir) {
-    this(name, homeDir, configDir, logDir, tempDir, null);
-  }
-
-  /**
-   * Creates a server instance.
-   * <p>
-   * The config, log and temp directories are all under the specified home
-   * directory.
-   * <p>
-   * It uses the provided configuration instead loading it from the config dir.
-   *
-   * @param name server name.
-   * @param homeDir server home directory.
-   * @param config server configuration.
-   */
-  public Server(String name, String homeDir, Configuration config) {
-    this(name,
-        homeDir,
-        homeDir + "/conf",
-        homeDir + "/log",
-        homeDir + "/temp",
-        config);
-  }
-
-  /**
-   * Creates a server instance.
-   * <p>
-   * It uses the provided configuration instead loading it from the config dir.
-   *
-   * @param name server name.
-   * @param homeDir server home directory.
-   * @param configDir config directory.
-   * @param logDir log directory.
-   * @param tempDir temp directory.
-   * @param config server configuration.
-   */
-  public Server(String name,
-                String homeDir,
-                String configDir,
-                String logDir,
-                String tempDir,
-                Configuration config) {
-    this.name = StringUtils
-        .toLowerCase(Check.notEmpty(name, "name").trim());
-    this.homeDir = Check.notEmpty(homeDir, "homeDir");
-    this.configDir = Check.notEmpty(configDir, "configDir");
-    this.logDir = Check.notEmpty(logDir, "logDir");
-    this.tempDir = Check.notEmpty(tempDir, "tempDir");
-    checkAbsolutePath(homeDir, "homeDir");
-    checkAbsolutePath(configDir, "configDir");
-    checkAbsolutePath(logDir, "logDir");
-    checkAbsolutePath(tempDir, "tempDir");
-    if (config != null) {
-      this.config = new Configuration(false);
-      ConfigurationUtils.copy(config, this.config);
-    }
-    status = Status.UNDEF;
-  }
-
-  /**
-   * Validates that the specified value is an absolute path (starts with '/').
-   *
-   * @param value value to verify it is an absolute path.
-   * @param n name to use in the exception if the value is not an absolute
-   * path.
-   *
-   * @return the value.
-   *
-   * @throws IllegalArgumentException thrown if the value is not an absolute
-   * path.
-   */
-  private String checkAbsolutePath(String value, String n) {
-    if (!new File(value).isAbsolute()) {
-      throw new IllegalArgumentException(
-        MessageFormat.format("[{0}] must be an absolute path [{1}]",
-            n,
-            value));
-    }
-    return value;
-  }
-
-  /**
-   * Returns the current server status.
-   *
-   * @return the current server status.
-   */
-  public Status getStatus() {
-    return status;
-  }
-
-  /**
-   * Sets a new server status.
-   * <p>
-   * The status must be settable.
-   * <p>
-   * All services will be notified o the status change via the
-   * {@link Service#serverStatusChange(Server.Status, Server.Status)} method.
-   * If a service throws an exception during the notification, the server will
-   * be destroyed.
-   *
-   * @param status status to set.
-   *
-   * @throws ServerException thrown if the service has been destroy because of
-   * a failed notification to a service.
-   */
-  public void setStatus(Status status) throws ServerException {
-    Check.notNull(status, "status");
-    if (status.settable) {
-      if (status != this.status) {
-        Status oldStatus = this.status;
-        this.status = status;
-        for (Service service : services.values()) {
-          try {
-            service.serverStatusChange(oldStatus, status);
-          } catch (Exception ex) {
-            log.error("Service [{}] exception during status change to [{}] " +
-                    "-server shutting down-,  {}",
-                new Object[]{service.getInterface().getSimpleName(),
-                    status,
-                    ex.getMessage(),
-                    ex});
-            destroy();
-            throw new ServerException(ServerException.ERROR.S11,
-                service.getInterface().getSimpleName(),
-                status,
-                ex.getMessage(),
-                ex);
-          }
-        }
-      }
-    } else {
-      throw new IllegalArgumentException("Status [" + status
-          + " is not settable");
-    }
-  }
-
-  /**
-   * Verifies the server is operational.
-   *
-   * @throws IllegalStateException thrown if the server is not operational.
-   */
-  protected void ensureOperational() {
-    if (!getStatus().isOperational()) {
-      throw new IllegalStateException("Server is not running");
-    }
-  }
-
-  /**
-   * Convenience method that returns a resource as inputstream from the
-   * classpath.
-   * <p>
-   * It first attempts to use the Thread's context classloader and if not
-   * set it uses the <code>ClassUtils</code> classloader.
-   *
-   * @param name resource to retrieve.
-   *
-   * @return inputstream with the resource, NULL if the resource does not
-   *         exist.
-   */
-  static InputStream getResource(String name) {
-    Check.notEmpty(name, "name");
-    ClassLoader cl = Thread.currentThread().getContextClassLoader();
-    if (cl == null) {
-      cl = Server.class.getClassLoader();
-    }
-    return cl.getResourceAsStream(name);
-  }
-
-  /**
-   * Initializes the Server.
-   * <p>
-   * The initialization steps are:
-   * <ul>
-   * <li>It verifies the service home and temp directories exist</li>
-   * <li>Loads the Server <code>#SERVER#-default.xml</code>
-   * configuration file from the classpath</li>
-   * <li>Initializes log4j logging. If the
-   * <code>#SERVER#-log4j.properties</code> file does not exist in the config
-   * directory it load <code>default-log4j.properties</code> from the classpath
-   * </li>
-   * <li>Loads the <code>#SERVER#-site.xml</code> file from the server config
-   * directory and merges it with the default configuration.</li>
-   * <li>Loads the services</li>
-   * <li>Initializes the services</li>
-   * <li>Post-initializes the services</li>
-   * <li>Sets the server startup status</li>
-   * </ul>
-   *
-   * @throws ServerException thrown if the server could not be initialized.
-   */
-  public void init() throws ServerException {
-    if (status != Status.UNDEF) {
-      throw new IllegalStateException("Server already initialized");
-    }
-    status = Status.BOOTING;
-    verifyDir(homeDir);
-    verifyDir(tempDir);
-    Properties serverInfo = new Properties();
-    try {
-      InputStream is = getResource(name + ".properties");
-      serverInfo.load(is);
-      is.close();
-    } catch (IOException ex) {
-      throw new RuntimeException("Could not load server information file: "
-          + name + ".properties");
-    }
-    initLog();
-    log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
-    log.info("Server [{}] starting", name);
-    log.info("  Built information:");
-    log.info("    Version           : {}", serverInfo.getProperty(name
-        + ".version", "undef"));
-    log.info("    Source Repository : {}", serverInfo.getProperty(name
-        + ".source.repository", "undef"));
-    log.info("    Source Revision   : {}", serverInfo.getProperty(name
-        + ".source.revision", "undef"));
-    log.info("    Built by          : {}", serverInfo.getProperty(name
-        + ".build.username", "undef"));
-    log.info("    Built timestamp   : {}", serverInfo.getProperty(name
-        + ".build.timestamp", "undef"));
-    log.info("  Runtime information:");
-    log.info("    Home   dir: {}", homeDir);
-    log.info("    Config dir: {}", (config == null) ? configDir : "-");
-    log.info("    Log    dir: {}", logDir);
-    log.info("    Temp   dir: {}", tempDir);
-    initConfig();
-    log.debug("Loading services");
-    List<Service> list = loadServices();
-    try {
-      log.debug("Initializing services");
-      initServices(list);
-      log.info("Services initialized");
-    } catch (ServerException ex) {
-      log.error("Services initialization failure, " +
-          "destroying initialized services");
-      destroyServices();
-      throw ex;
-    }
-    Status s = Status.valueOf(getConfig()
-        .get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
-    setStatus(s);
-    log.info("Server [{}] started!, status [{}]", name, s);
-  }
-
-  /**
-   * Verifies the specified directory exists.
-   *
-   * @param dir directory to verify it exists.
-   *
-   * @throws ServerException thrown if the directory does not exist or it the
-   * path it is not a directory.
-   */
-  private void verifyDir(String dir) throws ServerException {
-    File file = new File(dir);
-    if (!file.exists()) {
-      throw new ServerException(ServerException.ERROR.S01, dir);
-    }
-    if (!file.isDirectory()) {
-      throw new ServerException(ServerException.ERROR.S02, dir);
-    }
-  }
-
-  /**
-   * Initializes Log4j logging.
-   *
-   * @throws ServerException thrown if Log4j could not be initialized.
-   */
-  protected void initLog() throws ServerException {
-    verifyDir(logDir);
-    LogManager.resetConfiguration();
-    File log4jFile = new File(configDir, name + "-log4j.properties");
-    if (log4jFile.exists()) {
-      PropertyConfigurator
-          .configureAndWatch(log4jFile.toString(), 10 * 1000); //every 10 secs
-      log = LoggerFactory.getLogger(Server.class);
-    } else {
-      Properties props = new Properties();
-      try {
-        InputStream is = getResource(DEFAULT_LOG4J_PROPERTIES);
-        try {
-          props.load(is);
-        } finally {
-          is.close();
-        }
-      } catch (IOException ex) {
-        throw new ServerException(ServerException.ERROR.S03,
-            DEFAULT_LOG4J_PROPERTIES,
-            ex.getMessage(),
-            ex);
-      }
-      PropertyConfigurator.configure(props);
-      log = LoggerFactory.getLogger(Server.class);
-      log.warn("Log4j [{}] configuration file not found, using default " +
-          "configuration from classpath", log4jFile);
-    }
-  }
-
-  /**
-   * Loads and inializes the server configuration.
-   *
-   * @throws ServerException thrown if the configuration could not be
-   * loaded/initialized.
-   */
-  protected void initConfig() throws ServerException {
-    verifyDir(configDir);
-    File file = new File(configDir);
-    Configuration defaultConf;
-    String defaultConfig = name + "-default.xml";
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    InputStream inputStream = classLoader.getResourceAsStream(defaultConfig);
-    if (inputStream == null) {
-      log.warn("Default configuration file not available in classpath [{}]",
-          defaultConfig);
-      defaultConf = new Configuration(false);
-    } else {
-      try {
-        defaultConf = new Configuration(false);
-        ConfigurationUtils.load(defaultConf, inputStream);
-      } catch (Exception ex) {
-        throw new ServerException(ServerException.ERROR.S03,
-            defaultConfig,
-            ex.getMessage(),
-            ex);
-      }
-    }
-
-    if (config == null) {
-      Configuration siteConf;
-      File siteFile = new File(file, name + "-site.xml");
-      if (!siteFile.exists()) {
-        log.warn("Site configuration file [{}] not found in config directory",
-            siteFile);
-        siteConf = new Configuration(false);
-      } else {
-        if (!siteFile.isFile()) {
-          throw new ServerException(ServerException.ERROR.S05,
-              siteFile.getAbsolutePath());
-        }
-        try {
-          log.debug("Loading site configuration from [{}]", siteFile);
-          inputStream = Files.newInputStream(siteFile.toPath());
-          siteConf = new Configuration(false);
-          ConfigurationUtils.load(siteConf, inputStream);
-        } catch (IOException ex) {
-          throw new ServerException(ServerException.ERROR.S06,
-              siteFile,
-              ex.getMessage(),
-              ex);
-        }
-      }
-
-      config = new Configuration(false);
-      ConfigurationUtils.copy(siteConf, config);
-    }
-
-    ConfigurationUtils.injectDefaults(defaultConf, config);
-    ConfigRedactor redactor = new ConfigRedactor(config);
-    for (String s : System.getProperties().stringPropertyNames()) {
-      String value = System.getProperty(s);
-      if (s.startsWith(getPrefix() + ".")) {
-        config.set(s, value);
-        String redacted = redactor.redact(s, value);
-        log.info("System property sets  {}: {}", s, redacted);
-      }
-    }
-
-    log.debug("Loaded Configuration:");
-    log.debug("------------------------------------------------------");
-    for (Map.Entry<String, String> entry : config) {
-      String key = entry.getKey();
-      String value = config.get(entry.getKey());
-      String redacted = redactor.redact(key, value);
-      log.debug("  {}: {}", entry.getKey(), redacted);
-    }
-    log.debug("------------------------------------------------------");
-  }
-
-  /**
-   * Loads the specified services.
-   *
-   * @param classes services classes to load.
-   * @param list list of loaded service in order of appearance in the
-   * configuration.
-   *
-   * @throws ServerException thrown if a service class could not be loaded.
-   */
-  private void loadServices(Class[] classes, List<Service> list)
-      throws ServerException {
-    for (Class klass : classes) {
-      try {
-        Service service = (Service) klass.newInstance();
-        log.debug("Loading service [{}] implementation [{}]",
-            service.getInterface(),
-            service.getClass());
-        if (!service.getInterface().isInstance(service)) {
-          throw new ServerException(ServerException.ERROR.S04,
-              klass,
-              service.getInterface().getName());
-        }
-        list.add(service);
-      } catch (ServerException ex) {
-        throw ex;
-      } catch (Exception ex) {
-        throw new ServerException(ServerException.ERROR.S07,
-            klass,
-            ex.getMessage(),
-            ex);
-      }
-    }
-  }
-
-  /**
-   * Loads services defined in <code>services</code> and
-   * <code>services.ext</code> and de-dups them.
-   *
-   * @return List of final services to initialize.
-   *
-   * @throws ServerException throw if the services could not be loaded.
-   */
-  protected List<Service> loadServices() throws ServerException {
-    try {
-      Map<Class, Service> map = new LinkedHashMap<Class, Service>();
-      Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES));
-      Class[] classesExt
-          = getConfig().getClasses(getPrefixedName(CONF_SERVICES_EXT));
-      List<Service> list = new ArrayList<Service>();
-      loadServices(classes, list);
-      loadServices(classesExt, list);
-
-      //removing duplicate services, strategy: last one wins
-      for (Service service : list) {
-        if (map.containsKey(service.getInterface())) {
-          log.debug("Replacing service [{}] implementation [{}]",
-              service.getInterface(),
-              service.getClass());
-        }
-        map.put(service.getInterface(), service);
-      }
-      list = new ArrayList<Service>();
-      for (Map.Entry<Class, Service> entry : map.entrySet()) {
-        list.add(entry.getValue());
-      }
-      return list;
-    } catch (RuntimeException ex) {
-      throw new ServerException(ServerException.ERROR.S08, ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Initializes the list of serviceList.
-   *
-   * @param serviceList serviceList to initialized, it must be a de-dupped
-   *                    list of serviceList.
-   *
-   * @throws ServerException thrown if the serviceList could not be initialized.
-   */
-  protected void initServices(List<Service> serviceList)
-      throws ServerException {
-    for (Service service : serviceList) {
-      log.debug("Initializing service [{}]", service.getInterface());
-      checkServiceDependencies(service);
-      service.init(this);
-      this.services.put(service.getInterface(), service);
-    }
-    for (Service service : serviceList) {
-      service.postInit();
-    }
-  }
-
-  /**
-   * Checks if all service dependencies of a service are available.
-   *
-   * @param service service to check if all its dependencies are available.
-   *
-   * @throws ServerException thrown if a service dependency is missing.
-   */
-  protected void checkServiceDependencies(Service service)
-      throws ServerException {
-    if (service.getServiceDependencies() != null) {
-      for (Class dependency : service.getServiceDependencies()) {
-        if (services.get(dependency) == null) {
-          throw new ServerException(ServerException.ERROR.S10,
-              service.getClass(),
-              dependency);
-        }
-      }
-    }
-  }
-
-  /**
-   * Destroys the server services.
-   */
-  protected void destroyServices() {
-    List<Service> list = new ArrayList<Service>(services.values());
-    Collections.reverse(list);
-    for (Service service : list) {
-      try {
-        log.debug("Destroying service [{}]", service.getInterface());
-        service.destroy();
-      } catch (Throwable ex) {
-        log.error("Could not destroy service [{}], {}",
-                  new Object[]{service.getInterface(), ex.getMessage(), ex});
-      }
-    }
-    log.info("Services destroyed");
-  }
-
-  /**
-   * Destroys the server.
-   * <p>
-   * All services are destroyed in reverse order of initialization, then the
-   * Log4j framework is shutdown.
-   */
-  public void destroy() {
-    ensureOperational();
-    destroyServices();
-    log.info("Server [{}] shutdown!", name);
-    log.info("======================================================");
-    if (!Boolean.getBoolean("test.circus")) {
-      LogManager.shutdown();
-    }
-    status = Status.SHUTDOWN;
-  }
-
-  /**
-   * Returns the name of the server.
-   *
-   * @return the server name.
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns the server prefix for server configuration properties.
-   * <p>
-   * By default it is the server name.
-   *
-   * @return the prefix for server configuration properties.
-   */
-  public String getPrefix() {
-    return getName();
-  }
-
-  /**
-   * Returns the prefixed s of a server property.
-   *
-   * @param s of the property.
-   *
-   * @return prefixed s of the property.
-   */
-  public String getPrefixedName(String s) {
-    return getPrefix() + "." + Check.notEmpty(s, "s");
-  }
-
-  /**
-   * Returns the server home dir.
-   *
-   * @return the server home dir.
-   */
-  public String getHomeDir() {
-    return homeDir;
-  }
-
-  /**
-   * Returns the server config dir.
-   *
-   * @return the server config dir.
-   */
-  public String getConfigDir() {
-    return configDir;
-  }
-
-  /**
-   * Returns the server log dir.
-   *
-   * @return the server log dir.
-   */
-  public String getLogDir() {
-    return logDir;
-  }
-
-  /**
-   * Returns the server temp dir.
-   *
-   * @return the server temp dir.
-   */
-  public String getTempDir() {
-    return tempDir;
-  }
-
-  /**
-   * Returns the server configuration.
-   *
-   * @return the server configuration.
-   */
-  public Configuration getConfig() {
-    return config;
-
-  }
-
-  /**
-   * Returns the {@link Service} associated to the specified interface.
-   *
-   * @param serviceKlass service interface.
-   *
-   * @return the service implementation.
-   */
-  @SuppressWarnings("unchecked")
-  public <T> T get(Class<T> serviceKlass) {
-    ensureOperational();
-    Check.notNull(serviceKlass, "serviceKlass");
-    return (T) services.get(serviceKlass);
-  }
-
-  /**
-   * Adds a service programmatically.
-   * <p>
-   * If a service with the same interface exists, it will be destroyed and
-   * removed before the given one is initialized and added.
-   * <p>
-   * If an exception is thrown the server is destroyed.
-   *
-   * @param klass service class to add.
-   *
-   * @throws ServerException throw if the service could not initialized/added
-   * to the server.
-   */
-  public void setService(Class<? extends Service> klass)
-      throws ServerException {
-    ensureOperational();
-    Check.notNull(klass, "serviceKlass");
-    if (getStatus() == Status.SHUTTING_DOWN) {
-      throw new IllegalStateException("Server shutting down");
-    }
-    try {
-      Service newService = klass.newInstance();
-      Service oldService = services.get(newService.getInterface());
-      if (oldService != null) {
-        try {
-          oldService.destroy();
-        } catch (Throwable ex) {
-          log.error("Could not destroy service [{}], {}",
-              new Object[]{oldService.getInterface(), ex.getMessage(), ex});
-        }
-      }
-      newService.init(this);
-      services.put(newService.getInterface(), newService);
-    } catch (Exception ex) {
-      log.error("Could not set service [{}] programmatically -server shutting "
-          + "down-, {}", klass, ex);
-      destroy();
-      throw new ServerException(ServerException.ERROR.S09,
-          klass,
-          ex.getMessage(),
-          ex);
-    }
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/ServerException.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/ServerException.java
deleted file mode 100644
index 8f937db70d..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/ServerException.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.server;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.lang.XException;
-
-/**
- * Exception thrown by the {@link Server} class.
- */
-@InterfaceAudience.Private
-public class ServerException extends XException {
-
-  /**
-   * Error codes use by the {@link Server} class.
-   */
-  @InterfaceAudience.Private
-  public enum ERROR implements XException.ERROR {
-    S01("Dir [{0}] does not exist"),
-    S02("[{0}] is not a directory"),
-    S03("Could not load file from classpath [{0}], {1}"),
-    S04("Service [{0}] does not implement declared interface [{1}]"),
-    S05("[{0}] is not a file"),
-    S06("Could not load file [{0}], {1}"),
-    S07("Could not instanciate service class [{0}], {1}"),
-    S08("Could not load service classes, {0}"),
-    S09("Could not set service [{0}] programmatically " +
-        "-server shutting down-, {1}"),
-    S10("Service [{0}] requires service [{1}]"),
-    S11("Service [{0}] exception during status change to [{1}] " +
-        "-server shutting down-, {2}"),
-    S12("Could not start service [{0}], {1}"),
-    S13("Missing system property [{0}]"),
-    S14("Could not initialize server, {0}");
-
-    private String msg;
-
-    /**
-     * Constructor for the error code enum.
-     *
-     * @param msg message template.
-     */
-    ERROR(String msg) {
-      this.msg = msg;
-    }
-
-    /**
-     * Returns the message template for the error code.
-     *
-     * @return the message template for the error code.
-     */
-    @Override
-    public String getTemplate() {
-      return msg;
-    }
-  }
-
-  /**
-   * Constructor for sub-classes.
-   *
-   * @param error error code for the XException.
-   * @param params parameters to use when creating the error message
-   * with the error code template.
-   */
-  protected ServerException(XException.ERROR error, Object... params) {
-    super(error, params);
-  }
-
-  /**
-   * Creates an server exception using the specified error code.
-   * The exception message is resolved using the error code template
-   * and the passed parameters.
-   *
-   * @param error error code for the XException.
-   * @param params parameters to use when creating the error message
-   * with the error code template.
-   */
-  public ServerException(ERROR error, Object... params) {
-    super(error, params);
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Service.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Service.java
deleted file mode 100644
index 7745bf5cee..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Service.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.server;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-
-/**
- * Service interface for components to be managed by the {@link Server} class.
- */
-@InterfaceAudience.Private
-public interface Service {
-
-  /**
-   * Initializes the service. This method is called once, when the
-   * {@link Server} owning the service is being initialized.
-   *
-   * @param server the server initializing the service, give access to the
-   * server context.
-   *
-   * @throws ServiceException thrown if the service could not be initialized.
-   */
-  void init(Server server) throws ServiceException;
-
-  /**
-   * Post initializes the service. This method is called by the
-   * {@link Server} after all services of the server have been initialized.
-   *
-   * @throws ServiceException thrown if the service could not be
-   * post-initialized.
-   */
-  void postInit() throws ServiceException;
-
-  /**
-   * Destroy the services.  This method is called once, when the
-   * {@link Server} owning the service is being destroyed.
-   */
-  void destroy();
-
-  /**
-   * Returns the service dependencies of this service. The service will be
-   * instantiated only if all the service dependencies are already initialized.
-   *
-   * @return the service dependencies.
-   */
-  Class[] getServiceDependencies();
-
-  /**
-   * Returns the interface implemented by this service. This interface is used
-   * the {@link Server} when the {@link Server#get(Class)} method is used to
-   * retrieve a service.
-   *
-   * @return the interface that identifies the service.
-   */
-  Class getInterface();
-
-  /**
-   * Notification callback when the server changes its status.
-   *
-   * @param oldStatus old server status.
-   * @param newStatus new server status.
-   *
-   * @throws ServiceException thrown if the service could not process the status
-   * change.
-   */
-  void serverStatusChange(Server.Status oldStatus, Server.Status newStatus)
-      throws ServiceException;
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/ServiceException.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/ServiceException.java
deleted file mode 100644
index c8f3b15e18..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/ServiceException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.server;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.lang.XException;
-
-/**
- * Exception thrown by {@link Service} implementations.
- */
-@InterfaceAudience.Private
-public class ServiceException extends ServerException {
-
-  /**
-   * Creates an service exception using the specified error code.
-   * The exception message is resolved using the error code template
-   * and the passed parameters.
-   *
-   * @param error error code for the XException.
-   * @param params parameters to use when creating the error message
-   * with the error code template.
-   */
-  public ServiceException(XException.ERROR error, Object... params) {
-    super(error, params);
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/package-info.java
deleted file mode 100644
index 5c13c69c04..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * Basic server implementations.
- */
-package org.apache.ozone.lib.server;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/FileSystemAccess.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/FileSystemAccess.java
deleted file mode 100644
index dcd6ac292f..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/FileSystemAccess.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.service;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-
-import java.io.IOException;
-
-/**
- * Interface for accessing the filesystem.
- */
-@InterfaceAudience.Private
-public interface FileSystemAccess {
-
-  /**
-   * Executor to filesystem operations.
-   *
-   * @param <T> return type of operations.
-   */
-  interface FileSystemExecutor<T> {
-
-    T execute(FileSystem fs) throws IOException;
-  }
-
-  <T> T execute(String user,
-                Configuration conf,
-                FileSystemExecutor<T> executor)
-      throws FileSystemAccessException;
-
-  FileSystem createFileSystem(String user, Configuration conf)
-      throws IOException, FileSystemAccessException;
-
-  void releaseFileSystem(FileSystem fs) throws IOException;
-
-  Configuration getFileSystemConfiguration();
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/FileSystemAccessException.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/FileSystemAccessException.java
deleted file mode 100644
index 189d7b6940..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/FileSystemAccessException.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.service;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.lang.XException;
-
-/**
- * Exception thrown when filesystem access problem.
- */
-@InterfaceAudience.Private
-public class FileSystemAccessException extends XException {
-
-  /**
-   * Error codes.
-   */
-  public enum ERROR implements XException.ERROR {
-    H01("Service property [{0}] not defined"),
-    H02("Kerberos initialization failed, {0}"),
-    H03("FileSystemExecutor error, {0}"),
-    H04("Invalid configuration, it has not be created by the " +
-        "FileSystemAccessService"),
-    H05("[{0}] validation failed, {1}"),
-    H06("Property [{0}] not defined in configuration object"),
-    H07("[{0}] not healthy, {1}"),
-    H08("{0}"),
-    H09("Invalid FileSystemAccess security mode [{0}]"),
-    H10("Hadoop config directory not found [{0}]"),
-    H11("Could not load Hadoop config files, {0}");
-
-    private String template;
-
-    ERROR(String template) {
-      this.template = template;
-    }
-
-    @Override
-    public String getTemplate() {
-      return template;
-    }
-  }
-
-  public FileSystemAccessException(ERROR error, Object... params) {
-    super(error, params);
-  }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Groups.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Groups.java
deleted file mode 100644
index 9a7542bb9c..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Groups.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.service;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Groups interface.
- */
-@InterfaceAudience.Private
-public interface Groups {
-
-  List<String> getGroups(String user) throws IOException;
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Instrumentation.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Instrumentation.java
deleted file mode 100644
index 71697a0b1d..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Instrumentation.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.service;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-
-import java.util.Map;
-
-/**
- * Hadoop server instrumentation implementation.
- */
-@InterfaceAudience.Private
-public interface Instrumentation {
-
-  /**
-   * Cron interface.
-   */
-  interface Cron {
-
-    Cron start();
-
-    Cron stop();
-  }
-
-  /**
-   * Variable interface.
-   *
-   * @param <T> the type of the variable.
-   */
-  interface Variable<T> {
-
-    T getValue();
-  }
-
-  Cron createCron();
-
-  void incr(String group, String name, long count);
-
-  void addCron(String group, String name, Cron cron);
-
-  void addVariable(String group, String name, Variable<?> variable);
-
-  //sampling happens once a second
-  void addSampler(String group,
-                  String name,
-                  int samplingSize,
-                  Variable<Long> variable);
-
-  Map<String, Map<String, ?>> getSnapshot();
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Scheduler.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Scheduler.java
deleted file mode 100644
index 9d06b89215..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/Scheduler.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.service;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Scheduler interface.
- */
-@InterfaceAudience.Private
-public interface Scheduler {
-
-  void schedule(Callable<?> callable,
-                                long delay,
-                                long interval,
-                                TimeUnit unit);
-
-  void schedule(Runnable runnable,
-                                long delay,
-                                long interval,
-                                TimeUnit unit);
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
deleted file mode 100644
index 986e79b7e9..0000000000
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
+++ /dev/null
@@ -1,465 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ozone.lib.service.hadoop;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ozone.lib.server.BaseService;
-import org.apache.ozone.lib.server.ServiceException;
-import org.apache.ozone.lib.service.FileSystemAccess;
-import org.apache.ozone.lib.service.FileSystemAccessException;
-import org.apache.ozone.lib.service.Instrumentation;
-import org.apache.ozone.lib.service.Scheduler;
-import org.apache.ozone.lib.util.Check;
-import org.apache.ozone.lib.util.ConfigurationUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.VersionInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-
-/**
- * Provides authenticated filesystem access.
- */
-@InterfaceAudience.Private
-public class FileSystemAccessService extends BaseService
-    implements FileSystemAccess {
-  private static final Logger LOG
-      = LoggerFactory.getLogger(FileSystemAccessService.class);
-
-  public static final String PREFIX = "hadoop";
-
-  private static final String INSTRUMENTATION_GROUP = "hadoop";
-
-  public static final String AUTHENTICATION_TYPE = "authentication.type";
-  public static final String KERBEROS_KEYTAB = "authentication.kerberos.keytab";
-  public static final String KERBEROS_PRINCIPAL
-      = "authentication.kerberos.principal";
-  public static final String FS_CACHE_PURGE_FREQUENCY
-      = "filesystem.cache.purge.frequency";
-  public static final String FS_CACHE_PURGE_TIMEOUT
-      = "filesystem.cache.purge.timeout";
-
-  public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
-
-  public static final String HADOOP_CONF_DIR = "config.dir";
-
-  private static final String[] HADOOP_CONF_FILES
-      = {"core-site.xml", "hdfs-site.xml"};
-
-  private static final String FILE_SYSTEM_SERVICE_CREATED
-      = "FileSystemAccessService.created";
-
-  private static class CachedFileSystem {
-    private FileSystem fs;
-    private long lastUse;
-    private long timeout;
-    private int count;
-
-    CachedFileSystem(long timeout) {
-      this.timeout = timeout;
-      lastUse = -1;
-      count = 0;
-    }
-
-    synchronized FileSystem getFileSystem(Configuration conf)
-        throws IOException {
-      if (fs == null) {
-        fs = FileSystem.get(conf);
-      }
-      lastUse = -1;
-      count++;
-      return fs;
-    }
-
-    synchronized void release() throws IOException {
-      count--;
-      if (count == 0) {
-        if (timeout == 0) {
-          fs.close();
-          fs = null;
-          lastUse = -1;
-        } else {
-          lastUse = System.currentTimeMillis();
-        }
-      }
-    }
-
-    // to avoid race conditions in the map cache adding removing entries
-    // an entry in the cache remains forever, it just closes/opens filesystems
-    // based on their utilization. Worse case scenario, the penalty we'll
-    // pay is that the amount of entries in the cache will be the total
-    // number of users in HDFS (which seems a resonable overhead).
-    synchronized boolean purgeIfIdle() throws IOException {
-      boolean ret = false;
-      if (count == 0 && lastUse != -1 &&
-          (System.currentTimeMillis() - lastUse) > timeout) {
-        fs.close();
-        fs = null;
-        lastUse = -1;
-        ret = true;
-      }
-      return ret;
-    }
-
-  }
-
-  public FileSystemAccessService() {
-    super(PREFIX);
-  }
-
-  private Collection<String> nameNodeWhitelist;
-
-  // Suppressed because serviceHadoopConf only used in this class and in the
-  // tests, which will be removed later.
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  Configuration serviceHadoopConf;
-  private Configuration fileSystemConf;
-
-  private AtomicInteger unmanagedFileSystems = new AtomicInteger();
-
-  private ConcurrentHashMap<String, CachedFileSystem> fsCache =
-      new ConcurrentHashMap<String, CachedFileSystem>();
-
-  private long purgeTimeout;
-
-  @Override
-  protected void init() throws ServiceException {
-    LOG.info("Using FileSystemAccess JARs version [{}]",
-        VersionInfo.getVersion());
-    String security = getServiceConfig()
-        .get(AUTHENTICATION_TYPE, "simple").trim();
-    if (security.equals("kerberos")) {
-      String defaultName = getServer().getName();
-      String keytab = System
-          .getProperty("user.home") + "/" + defaultName + ".keytab";
-      keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
-      if (keytab.length() == 0) {
-        throw new ServiceException(FileSystemAccessException.ERROR.H01,
-            KERBEROS_KEYTAB);
-      }
-      String principal = defaultName + "/localhost@LOCALHOST";
-      principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
-      if (principal.length() == 0) {
-        throw new ServiceException(FileSystemAccessException.ERROR.H01,
-            KERBEROS_PRINCIPAL);
-      }
-      Configuration conf = new Configuration();
-      conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-      UserGroupInformation.setConfiguration(conf);
-      try {
-        UserGroupInformation.loginUserFromKeytab(principal, keytab);
-      } catch (IOException ex) {
-        throw new ServiceException(FileSystemAccessException.ERROR.H02,
-            ex.getMessage(),
-            ex);
-      }
-      LOG.info("Using FileSystemAccess Kerberos authentication, " +
-          "principal [{}] keytab [{}]", principal, keytab);
-    } else if (security.equals("simple")) {
-      Configuration conf = new Configuration();
-      conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
-      UserGroupInformation.setConfiguration(conf);
-      LOG.info("Using FileSystemAccess simple/pseudo authentication, " +
-          "principal [{}]", System.getProperty("user.name"));
-    } else {
-      throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
-    }
-
-    String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR,
-        getServer().getConfigDir());
-    File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
-    if (!hadoopConfDir.exists()) {
-      hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
-    }
-    if (!hadoopConfDir.exists()) {
-      throw new ServiceException(FileSystemAccessException.ERROR.H10,
-          hadoopConfDir);
-    }
-    try {
-      serviceHadoopConf = loadHadoopConf(hadoopConfDir);
-      fileSystemConf = getNewFileSystemConfiguration();
-    } catch (IOException ex) {
-      throw new ServiceException(FileSystemAccessException.ERROR.H11,
-          ex.toString(),
-          ex);
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("FileSystemAccess FileSystem configuration:");
-      for (Map.Entry entry : serviceHadoopConf) {
-        LOG.debug("  {} = {}", entry.getKey(), entry.getValue());
-      }
-    }
-    setRequiredServiceHadoopConf(serviceHadoopConf);
-
-    nameNodeWhitelist = toLowerCase(getServiceConfig()
-        .getTrimmedStringCollection(NAME_NODE_WHITELIST));
-  }
-
-  private Configuration loadHadoopConf(File dir) throws IOException {
-    Configuration hadoopConf = new Configuration(false);
-    for (String file : HADOOP_CONF_FILES) {
-      File f = new File(dir, file);
-      if (f.exists()) {
-        hadoopConf.addResource(new Path(f.getAbsolutePath()));
-      }
-    }
-    return hadoopConf;
-  }
-
-  private Configuration getNewFileSystemConfiguration() {
-    Configuration conf = new Configuration(true);
-    ConfigurationUtils.copy(serviceHadoopConf, conf);
-    conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
-
-    // Force-clear server-side umask to make HttpFS match WebHDFS behavior
-    conf.set(FsPermission.UMASK_LABEL, "000");
-    return conf;
-  }
-
-  @Override
-  public void postInit() throws ServiceException {
-    super.postInit();
-    Instrumentation instrumentation = getServer().get(Instrumentation.class);
-    instrumentation.addVariable(INSTRUMENTATION_GROUP,
-        "unmanaged.fs",
-        new Instrumentation.Variable<Integer>() {
-          @Override
-          public Integer getValue() {
-            return unmanagedFileSystems.get();
-          }
-        });
-    instrumentation.addSampler(INSTRUMENTATION_GROUP,
-        "unmanaged.fs",
-        60,
-        new Instrumentation.Variable<Long>() {
-          @Override
-          public Long getValue() {
-            return (long) unmanagedFileSystems.get();
-          }
-        });
-    Scheduler scheduler = getServer().get(Scheduler.class);
-    int purgeInterval = getServiceConfig().getInt(FS_CACHE_PURGE_FREQUENCY, 60);
-    purgeTimeout = getServiceConfig().getLong(FS_CACHE_PURGE_TIMEOUT, 60);
-    purgeTimeout = (purgeTimeout > 0) ? purgeTimeout : 0;
-    if (purgeTimeout > 0) {
-      scheduler.schedule(new FileSystemCachePurger(),
-                         purgeInterval, purgeInterval, TimeUnit.SECONDS);
-    }
-  }
-
-  private class FileSystemCachePurger implements Runnable {
-
-    @Override
-    public void run() {
-      int count = 0;
-      for (CachedFileSystem cacheFs : fsCache.values()) {
-        try {
-          count += cacheFs.purgeIfIdle() ? 1 : 0;
-        } catch (Throwable ex) {
-          LOG.warn("Error while purging filesystem, " + ex.toString(), ex);
-        }
-      }
-      LOG.debug("Purged [{}] filesystem instances", count);
-    }
-  }
-
-  private Set<String> toLowerCase(Collection<String> collection) {
-    Set<String> set = new HashSet<String>();
-    for (String value : collection) {
-      set.add(StringUtils.toLowerCase(value));
-    }
-    return set;
-  }
-
-  @Override
-  public Class getInterface() {
-    return FileSystemAccess.class;
-  }
-
-  @Override
-  public Class[] getServiceDependencies() {
-    return new Class[]{Instrumentation.class, Scheduler.class};
-  }
-
-  protected UserGroupInformation getUGI(String user) throws IOException {
-    return UserGroupInformation.createProxyUser(user,
-        UserGroupInformation.getLoginUser());
-  }
-
-  protected void setRequiredServiceHadoopConf(Configuration conf) {
-    conf.set("fs.hdfs.impl.disable.cache", "true");
-  }
-
-  private static final String HTTPFS_FS_USER = "httpfs.fs.user";
-
-  protected FileSystem createFileSystem(Configuration namenodeConf)
-      throws IOException {
-    String user = UserGroupInformation.getCurrentUser().getShortUserName();
-    CachedFileSystem newCachedFS = new CachedFileSystem(purgeTimeout);
-    CachedFileSystem cachedFS = fsCache.putIfAbsent(user, newCachedFS);
-    if (cachedFS == null) {
-      cachedFS = newCachedFS;
-    }
-    Configuration conf = new Configuration(namenodeConf);
-    conf.set(HTTPFS_FS_USER, user);
-    return cachedFS.getFileSystem(conf);
-  }
-
-  protected void closeFileSystem(FileSystem fs) throws IOException {
-    if (fsCache.containsKey(fs.getConf().get(HTTPFS_FS_USER))) {
-      fsCache.get(fs.getConf().get(HTTPFS_FS_USER)).release();
-    }
-  }
-
-  protected void validateNamenode(String namenode)
-      throws FileSystemAccessException {
-    if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) {
-      if (!nameNodeWhitelist.contains(
-          StringUtils.toLowerCase(namenode))) {
-        throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05,
-            namenode,
-            "not in whitelist");
-      }
-    }
-  }
-
-  protected void checkNameNodeHealth(FileSystem fileSystem)
-      throws FileSystemAccessException {
-  }
-
-  @Override
-  public <T> T execute(String user,
-                       final Configuration conf,
-                       final FileSystemExecutor<T> executor)
-      throws FileSystemAccessException {
-    Check.notEmpty(user, "user");
-    Check.notNull(conf, "conf");
-    Check.notNull(executor, "executor");
-    if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
-      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
-    }
-    if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null ||
-        conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)
-            .length() == 0) {
-      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06,
-                                          CommonConfigurationKeysPublic
-                                              .FS_DEFAULT_NAME_KEY);
-    }
-    try {
-      validateNamenode(
-          new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY))
-            .getAuthority());
-      UserGroupInformation ugi = getUGI(user);
-      return ugi.doAs(new PrivilegedExceptionAction<T>() {
-        @Override
-        public T run() throws Exception {
-          FileSystem fs = createFileSystem(conf);
-          Instrumentation instrumentation = getServer()
-              .get(Instrumentation.class);
-          Instrumentation.Cron cron = instrumentation.createCron();
-          try {
-            checkNameNodeHealth(fs);
-            cron.start();
-            return executor.execute(fs);
-          } finally {
-            cron.stop();
-            instrumentation.addCron(INSTRUMENTATION_GROUP,
-                executor.getClass().getSimpleName(),
-                cron);
-            closeFileSystem(fs);
-          }
-        }
-      });
-    } catch (FileSystemAccessException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03,
-          ex);
-    }
-  }
-
-  public FileSystem createFileSystemInternal(String user,
-                                             final Configuration conf)
-      throws IOException, FileSystemAccessException {
-    Check.notEmpty(user, "user");
-    Check.notNull(conf, "conf");
-    if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
-      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
-    }
-    try {
-      validateNamenode(new URI(conf
-          .get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY))
-          .getAuthority());
... 3925 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org