You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-issues@hadoop.apache.org by GitBox <gi...@apache.org> on 2019/01/09 23:01:08 UTC

[GitHub] elek closed pull request #457: HDDS-965. Ozone: checkstyle improvements and code quality scripts.

elek closed pull request #457: HDDS-965. Ozone: checkstyle improvements and code quality scripts.
URL: https://github.com/apache/hadoop/pull/457
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index b1a70c01140a..f8d02deb262d 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -190,12 +190,6 @@ public long getReplicatedMinCommitIndex() {
     return minIndex.isPresent() ? minIndex.getAsLong() : 0;
   }
 
-  private void getFailedServer(
-      Collection<RaftProtos.CommitInfoProto> commitInfos) {
-    for (RaftProtos.CommitInfoProto proto : commitInfos) {
-
-    }
-  }
 
   @Override
   public long watchForCommit(long index, long timeout)
@@ -217,7 +211,7 @@ public long watchForCommit(long index, long timeout)
         .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED);
     RaftClientReply reply;
     try {
-      reply = replyFuture.get(timeout, TimeUnit.MILLISECONDS);
+      replyFuture.get(timeout, TimeUnit.MILLISECONDS);
     } catch (TimeoutException toe) {
       LOG.warn("3 way commit failed ", toe);
 
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 32c6b6ae9864..b62f7b6a6674 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -122,6 +122,7 @@
    * @param watchTimeout          watch timeout
    * @param checksum              checksum
    */
+  @SuppressWarnings("parameternumber")
   public BlockOutputStream(BlockID blockID, String key,
       XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient,
       String traceID, int chunkSize, long streamBufferFlushSize,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 18637af7a881..08be429231f4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -18,22 +18,6 @@
 
 package org.apache.hadoop.hdds;
 
-import com.google.common.base.Strings;
-import com.google.common.net.HostAndPort;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import javax.management.ObjectName;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -47,13 +31,26 @@
 import java.util.Optional;
 import java.util.TimeZone;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys
-    .DFS_DATANODE_DNS_INTERFACE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys
-    .DFS_DATANODE_DNS_NAMESERVER_KEY;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.net.NetUtils;
+
+import com.google.common.base.Strings;
+import com.google.common.net.HostAndPort;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * HDDS specific stateless utility functions.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
index 7110839546f7..ef7a56e6b2b6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
@@ -28,7 +28,7 @@
   @Override
   public String[] getVersion() throws Exception {
     String[] result = new String[] {
-        HddsVersionInfo.getBuildVersion()
+        HddsVersionInfo.HDDS_VERSION_INFO.getBuildVersion()
     };
     return result;
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
index 677b752bd9c8..712e8d37e046 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
@@ -139,6 +139,9 @@ static void writeResponse(Configuration conf,
     }
   }
 
+  /**
+   * Exception for signal bad content type.
+   */
   public static class BadFormatException extends Exception {
 
     private static final long serialVersionUID = 1L;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 0f4d3c5a10f1..05d4e77117c9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -81,6 +81,7 @@
   @JsonIgnore
   private byte[] data;
 
+  @SuppressWarnings("parameternumber")
   ContainerInfo(
       long containerID,
       HddsProtos.LifeCycleState state,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java
index 1aa935803b60..7c75fc0a1397 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-    import java.io.IOException;
+import java.io.IOException;
 
 /**
  * Signals that a pipeline state is not recognized.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index a24ba88a396c..44f86948dbc6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -67,7 +67,6 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.stream.Collectors;
 
 /**
  * This class is the client-side translator to translate the requests made on
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
index e7f697a0c50c..88b61f6f993e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
@@ -36,9 +36,13 @@
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class HddsVersionInfo {
+
   private static final Logger LOG = LoggerFactory.getLogger(
       HddsVersionInfo.class);
 
+  public static final HddsVersionInfo HDDS_VERSION_INFO =
+      new HddsVersionInfo("hdds");
+
   private Properties info;
 
   protected HddsVersionInfo(String component) {
@@ -46,7 +50,8 @@ protected HddsVersionInfo(String component) {
     String versionInfoFile = component + "-version-info.properties";
     InputStream is = null;
     try {
-      is = ThreadUtil.getResourceAsStream(HddsVersionInfo.class.getClassLoader(),
+      is = ThreadUtil.getResourceAsStream(
+          HddsVersionInfo.class.getClassLoader(),
           versionInfoFile);
       info.load(is);
     } catch (IOException ex) {
@@ -57,127 +62,56 @@ protected HddsVersionInfo(String component) {
     }
   }
 
-  protected String _getVersion() {
+  protected String getVersion() {
     return info.getProperty("version", "Unknown");
   }
 
-  protected String _getRevision() {
+  protected String getRevision() {
     return info.getProperty("revision", "Unknown");
   }
 
-  protected String _getBranch() {
+  protected String getBranch() {
     return info.getProperty("branch", "Unknown");
   }
 
-  protected String _getDate() {
+  protected String getDate() {
     return info.getProperty("date", "Unknown");
   }
 
-  protected String _getUser() {
+  protected String getUser() {
     return info.getProperty("user", "Unknown");
   }
 
-  protected String _getUrl() {
+  protected String getUrl() {
     return info.getProperty("url", "Unknown");
   }
 
-  protected String _getSrcChecksum() {
+  protected String getSrcChecksum() {
     return info.getProperty("srcChecksum", "Unknown");
   }
 
-  protected String _getBuildVersion(){
-    return _getVersion() +
-      " from " + _getRevision() +
-      " by " + _getUser() +
-      " source checksum " + _getSrcChecksum();
+  public String getBuildVersion() {
+    return HDDS_VERSION_INFO.getVersion() +
+        " from " + HDDS_VERSION_INFO.getRevision() +
+        " by " + getUser() +
+        " source checksum " + getSrcChecksum();
   }
 
-  protected String _getProtocVersion() {
+  protected String getProtocVersion() {
     return info.getProperty("protocVersion", "Unknown");
   }
 
-  private static final HddsVersionInfo HDDS_VERSION_INFO =
-      new HddsVersionInfo("hdds");
-  /**
-   * Get the HDDS version.
-   * @return the Hdds version string, eg. "0.6.3-dev"
-   */
-  public static String getVersion() {
-    return HDDS_VERSION_INFO._getVersion();
-  }
-
-  /**
-   * Get the Git commit hash of the repository when compiled.
-   * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
-   */
-  public static String getRevision() {
-    return HDDS_VERSION_INFO._getRevision();
-  }
-
-  /**
-   * Get the branch on which this originated.
-   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
-   */
-  public static String getBranch() {
-    return HDDS_VERSION_INFO._getBranch();
-  }
-
-  /**
-   * The date that HDDS was compiled.
-   * @return the compilation date in unix date format
-   */
-  public static String getDate() {
-    return HDDS_VERSION_INFO._getDate();
-  }
-
-  /**
-   * The user that compiled HDDS.
-   * @return the username of the user
-   */
-  public static String getUser() {
-    return HDDS_VERSION_INFO._getUser();
-  }
-
-  /**
-   * Get the URL for the HDDS repository.
-   * @return the URL of the Hdds repository
-   */
-  public static String getUrl() {
-    return HDDS_VERSION_INFO._getUrl();
-  }
-
-  /**
-   * Get the checksum of the source files from which HDDS was built.
-   * @return the checksum of the source files
-   */
-  public static String getSrcChecksum() {
-    return HDDS_VERSION_INFO._getSrcChecksum();
-  }
-
-  /**
-   * Returns the buildVersion which includes version,
-   * revision, user and date.
-   * @return the buildVersion
-   */
-  public static String getBuildVersion(){
-    return HDDS_VERSION_INFO._getBuildVersion();
-  }
-
-  /**
-   * Returns the protoc version used for the build.
-   * @return the protoc version
-   */
-  public static String getProtocVersion(){
-    return HDDS_VERSION_INFO._getProtocVersion();
-  }
-
   public static void main(String[] args) {
-    System.out.println("Using HDDS " + getVersion());
-    System.out.println("Source code repository " + getUrl() + " -r " +
-        getRevision());
-    System.out.println("Compiled by " + getUser() + " on " + getDate());
-    System.out.println("Compiled with protoc " + getProtocVersion());
-    System.out.println("From source with checksum " + getSrcChecksum());
+    System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion());
+    System.out.println(
+        "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " +
+            HDDS_VERSION_INFO.getRevision());
+    System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on "
+        + HDDS_VERSION_INFO.getDate());
+    System.out.println(
+        "Compiled with protoc " + HDDS_VERSION_INFO.getProtocVersion());
+    System.out.println(
+        "From source with checksum " + HDDS_VERSION_INFO.getSrcChecksum());
     LOG.debug("This command was run using " +
         ClassUtil.findContainingJar(HddsVersionInfo.class));
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
index a1c9b85c2a42..9df4206bd245 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
@@ -18,10 +18,20 @@
 
 package org.apache.hadoop.utils;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.File;
+import java.io.IOException;
+import java.util.Optional;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+
+import com.google.common.annotations.VisibleForTesting;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
 import org.iq80.leveldb.Options;
 import org.rocksdb.BlockBasedTableConfig;
 import org.rocksdb.Statistics;
@@ -29,22 +39,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Optional;
-import java.util.function.Supplier;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
-
 /**
  * Builder for metadata store.
  */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 972eb8316f22..cd93e4857e30 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -45,6 +45,7 @@
  * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type
  * should have an implementation for Handler.
  */
+@SuppressWarnings("visibilitymodifier")
 public abstract class Handler {
 
   protected final Configuration conf;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 2d00da86c981..e4c0eb16aab9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -86,7 +86,8 @@ public VersionEndpointTask(EndpointStateMachine rpcEndPoint,
           Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
               "cannot be null");
 
-          // If version file does not exist create version file and also set scmId
+          // If version file does not exist
+          // create version file and also set scmId
 
           for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
             HddsVolume hddsVolume = entry.getValue();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
index 4ca99e0fbe9b..28033aa50c9b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
@@ -26,7 +26,7 @@
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class DispatcherContext {
+public final class DispatcherContext {
   /**
    * Determines which stage of writeChunk a write chunk request is for.
    */
@@ -82,8 +82,8 @@ public WriteChunkStage getStage() {
      * @param stage WriteChunk Stage
      * @return DispatcherContext.Builder
      */
-    public Builder setStage(WriteChunkStage stage) {
-      this.stage = stage;
+    public Builder setStage(WriteChunkStage writeChunkStage) {
+      this.stage = writeChunkStage;
       return this;
     }
 
@@ -93,8 +93,8 @@ public Builder setStage(WriteChunkStage stage) {
      * @param readFromTmpFile whether to read from tmp chunk file or not
      * @return DispatcherContext.Builder
      */
-    public Builder setReadFromTmpFile(boolean readFromTmpFile) {
-      this.readFromTmpFile = readFromTmpFile;
+    public Builder setReadFromTmpFile(boolean setReadFromTmpFile) {
+      this.readFromTmpFile = setReadFromTmpFile;
       return this;
     }
 
@@ -104,8 +104,8 @@ public Builder setReadFromTmpFile(boolean readFromTmpFile) {
      * @param term current term
      * @return DispatcherContext.Builder
      */
-    public Builder setTerm(long term) {
-      this.term = term;
+    public Builder setTerm(long currentTerm) {
+      this.term = currentTerm;
       return this;
     }
 
@@ -115,8 +115,8 @@ public Builder setTerm(long term) {
      * @param logIndex log index
      * @return DispatcherContext.Builder
      */
-    public Builder setLogIndex(long logIndex) {
-      this.logIndex = logIndex;
+    public Builder setLogIndex(long index) {
+      this.logIndex = index;
       return this;
     }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 5abc18541c28..37aade76b044 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -156,58 +156,19 @@ private RaftProperties newRaftProperties(Configuration conf) {
     final RaftProperties properties = new RaftProperties();
 
     // Set rpc type
-    final String rpcType = conf.get(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType);
-    RaftConfigKeys.Rpc.setType(properties, rpc);
+    final RpcType rpc = setRpcType(conf, properties);
 
     // set raft segment size
-    final int raftSegmentSize = (int)conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.setSegmentSizeMax(properties,
-        SizeInBytes.valueOf(raftSegmentSize));
+    setRaftSegmentSize(conf, properties);
 
     // set raft segment pre-allocated size
-    final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    int logAppenderQueueNumElements = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS,
-        OzoneConfigKeys
-            .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT);
-    final int logAppenderQueueByteLimit = (int) conf.getStorageSize(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
-        OzoneConfigKeys
-            .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
-        StorageUnit.BYTES);
-    RaftServerConfigKeys.Log.Appender
-        .setBufferElementLimit(properties, logAppenderQueueNumElements);
-    RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties,
-        SizeInBytes.valueOf(logAppenderQueueByteLimit));
-    RaftServerConfigKeys.Log.setPreallocatedSize(properties,
-        SizeInBytes.valueOf(raftSegmentPreallocatedSize));
+    final int raftSegmentPreallocatedSize =
+        setRaftSegmentPreallocatedSize(conf, properties);
 
     // Set max write buffer size, which is the scm chunk size
-    final int maxChunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
-    RaftServerConfigKeys.Log.setWriteBufferSize(properties,
-        SizeInBytes.valueOf(maxChunkSize));
-
-    // Set the client requestTimeout
-    TimeUnit timeUnit =
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    long duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration clientRequestTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftClientConfigKeys.Rpc
-        .setRequestTimeout(properties, clientRequestTimeout);
+    final int maxChunkSize = setMaxWriteBuffer(conf, properties);
+    TimeUnit timeUnit;
+    long duration;
 
     // set the configs enable and set the stateMachineData sync timeout
     RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true);
@@ -224,66 +185,19 @@ private RaftProperties newRaftProperties(Configuration conf) {
         .setSyncTimeout(properties, dataSyncTimeout);
 
     // Set the server Request timeout
-    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
-        .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration serverRequestTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.Rpc
-        .setRequestTimeout(properties, serverRequestTimeout);
+    setServerRequestTimeout(conf, properties);
 
     // set timeout for a retry cache entry
-    timeUnit =
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration retryCacheTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.RetryCache
-        .setExpiryTime(properties, retryCacheTimeout);
+    setTimeoutForRetryCache(conf, properties);
 
     // Set the ratis leader election timeout
-    TimeUnit leaderElectionMinTimeoutUnit =
-        OzoneConfigKeys.
-            DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.
-            DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), leaderElectionMinTimeoutUnit);
-    final TimeDuration leaderElectionMinTimeout =
-        TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit);
-    RaftServerConfigKeys.Rpc
-        .setTimeoutMin(properties, leaderElectionMinTimeout);
-    long leaderElectionMaxTimeout =
-        leaderElectionMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200;
-    RaftServerConfigKeys.Rpc.setTimeoutMax(properties,
-        TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS));
+    setRatisLeaderElectionTimeout(conf, properties);
 
     // Set the maximum cache segments
     RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2);
 
     // set the node failure timeout
-    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-        .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration nodeFailureTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.setLeaderElectionTimeout(properties,
-        nodeFailureTimeout);
-    RaftServerConfigKeys.Rpc.setSlownessTimeout(properties,
-        nodeFailureTimeout);
-    nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS);
+    setNodeFailureTimeout(conf, properties);
 
     // Set the ratis storage directory
     String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf);
@@ -331,6 +245,143 @@ private RaftProperties newRaftProperties(Configuration conf) {
     return properties;
   }
 
+  private void setNodeFailureTimeout(Configuration conf,
+                                     RaftProperties properties) {
+    TimeUnit timeUnit;
+    long duration;
+    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
+        .getUnit();
+    duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
+        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
+            .getDuration(), timeUnit);
+    final TimeDuration nodeFailureTimeout =
+        TimeDuration.valueOf(duration, timeUnit);
+    RaftServerConfigKeys.setLeaderElectionTimeout(properties,
+        nodeFailureTimeout);
+    RaftServerConfigKeys.Rpc.setSlownessTimeout(properties,
+        nodeFailureTimeout);
+    nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS);
+  }
+
+  private void setRatisLeaderElectionTimeout(Configuration conf,
+                                             RaftProperties properties) {
+    long duration;
+    TimeUnit leaderElectionMinTimeoutUnit =
+        OzoneConfigKeys.
+            DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
+            .getUnit();
+    duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
+        OzoneConfigKeys.
+            DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
+            .getDuration(), leaderElectionMinTimeoutUnit);
+    final TimeDuration leaderElectionMinTimeout =
+        TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit);
+    RaftServerConfigKeys.Rpc
+        .setTimeoutMin(properties, leaderElectionMinTimeout);
+    long leaderElectionMaxTimeout =
+        leaderElectionMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200;
+    RaftServerConfigKeys.Rpc.setTimeoutMax(properties,
+        TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS));
+  }
+
+  private void setTimeoutForRetryCache(Configuration conf,
+                                       RaftProperties properties) {
+    TimeUnit timeUnit;
+    long duration;
+    timeUnit =
+        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
+            .getUnit();
+    duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY,
+        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
+            .getDuration(), timeUnit);
+    final TimeDuration retryCacheTimeout =
+        TimeDuration.valueOf(duration, timeUnit);
+    RaftServerConfigKeys.RetryCache
+        .setExpiryTime(properties, retryCacheTimeout);
+  }
+
+  private void setServerRequestTimeout(Configuration conf,
+                                       RaftProperties properties) {
+    TimeUnit timeUnit;
+    long duration;
+    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
+        .getUnit();
+    duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY,
+        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
+            .getDuration(), timeUnit);
+    final TimeDuration serverRequestTimeout =
+        TimeDuration.valueOf(duration, timeUnit);
+    RaftServerConfigKeys.Rpc
+        .setRequestTimeout(properties, serverRequestTimeout);
+  }
+
+  private int setMaxWriteBuffer(Configuration conf, RaftProperties properties) {
+    final int maxChunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
+    RaftServerConfigKeys.Log.setWriteBufferSize(properties,
+        SizeInBytes.valueOf(maxChunkSize));
+
+    // Set the client requestTimeout
+    TimeUnit timeUnit =
+        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
+            .getUnit();
+    long duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
+        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
+            .getDuration(), timeUnit);
+    final TimeDuration clientRequestTimeout =
+        TimeDuration.valueOf(duration, timeUnit);
+    RaftClientConfigKeys.Rpc
+        .setRequestTimeout(properties, clientRequestTimeout);
+    return maxChunkSize;
+  }
+
+  private int setRaftSegmentPreallocatedSize(Configuration conf,
+                                             RaftProperties properties) {
+    final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
+        StorageUnit.BYTES);
+    int logAppenderQueueNumElements = conf.getInt(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS,
+        OzoneConfigKeys
+            .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT);
+    final int logAppenderQueueByteLimit = (int) conf.getStorageSize(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
+        OzoneConfigKeys
+            .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
+        StorageUnit.BYTES);
+    RaftServerConfigKeys.Log.Appender
+        .setBufferElementLimit(properties, logAppenderQueueNumElements);
+    RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties,
+        SizeInBytes.valueOf(logAppenderQueueByteLimit));
+    RaftServerConfigKeys.Log.setPreallocatedSize(properties,
+        SizeInBytes.valueOf(raftSegmentPreallocatedSize));
+    return raftSegmentPreallocatedSize;
+  }
+
+  private void setRaftSegmentSize(Configuration conf,
+                                  RaftProperties properties) {
+    final int raftSegmentSize = (int)conf.getStorageSize(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY,
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT,
+        StorageUnit.BYTES);
+    RaftServerConfigKeys.Log.setSegmentSizeMax(properties,
+        SizeInBytes.valueOf(raftSegmentSize));
+  }
+
+  private RpcType setRpcType(Configuration conf, RaftProperties properties) {
+    final String rpcType = conf.get(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+    final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType);
+    RaftConfigKeys.Rpc.setType(properties, rpc);
+    return rpc;
+  }
+
   public static XceiverServerRatis newXceiverServerRatis(
       DatanodeDetails datanodeDetails, Configuration ozoneConf,
       ContainerDispatcher dispatcher, StateContext context) throws IOException {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 0de9f18cf981..0d698982dfa4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -31,7 +31,7 @@
 /**
  * Stores information about a disk/volume.
  */
-public class VolumeInfo {
+public final class VolumeInfo {
 
   private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class);
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index 0a81ed8449e4..0c7a04e51da3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -41,10 +41,12 @@ private KeyValueContainerLocationUtil() {
    * @return containerMetadata Path to container metadata location where
    * .container file will be stored.
    */
-  public static File getContainerMetaDataPath(String hddsVolumeDir, String scmId,
+  public static File getContainerMetaDataPath(String hddsVolumeDir,
+                                              String scmId,
                                               long containerId) {
-    String containerMetaDataPath = getBaseContainerLocation(hddsVolumeDir, scmId,
-        containerId);
+    String containerMetaDataPath =
+        getBaseContainerLocation(hddsVolumeDir, scmId,
+            containerId);
     containerMetaDataPath = containerMetaDataPath + File.separator +
         OzoneConsts.CONTAINER_META_PATH;
     return new File(containerMetaDataPath);
@@ -72,8 +74,9 @@ public static File getChunksLocationPath(String baseDir, String scmId,
    * @param containerId
    * @return base directory for container.
    */
-  private static String getBaseContainerLocation(String hddsVolumeDir, String scmId,
-                                        long containerId) {
+  private static String getBaseContainerLocation(String hddsVolumeDir,
+                                                 String scmId,
+                                                 long containerId) {
     Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null");
     Preconditions.checkNotNull(scmId, "scmUuid cannot be null");
     Preconditions.checkState(containerId >= 0,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java
index 30a251d4e364..0deaf0042d8a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java
@@ -57,10 +57,10 @@ public void download(CopyContainerRequestProto request,
     LOG.info("Streaming container data ({}) to other datanode",
         request.getContainerID());
     try {
-        GrpcOutputStream outputStream =
-            new GrpcOutputStream(responseObserver, request.getContainerID());
-        containerReplicationSource
-            .copyData(request.getContainerID(), outputStream);
+      GrpcOutputStream outputStream =
+          new GrpcOutputStream(responseObserver, request.getContainerID());
+      containerReplicationSource
+          .copyData(request.getContainerID(), outputStream);
 
     } catch (IOException e) {
       LOG.error("Can't stream the container data", e);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
index a461a98f2361..032dc7db9bbc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
@@ -30,7 +30,6 @@
 import java.util.function.Function;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java
index 2659ab370416..e9ccb08a1411 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java
@@ -24,13 +24,17 @@
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
 
+/**
+ * Command status to report about block deletion.
+ */
 public class DeleteBlockCommandStatus extends CommandStatus {
 
   private ContainerBlocksDeletionACKProto blocksDeletionAck = null;
 
   public DeleteBlockCommandStatus(Type type, Long cmdId,
       StorageContainerDatanodeProtocolProtos.CommandStatus.Status status,
-      String msg, ContainerBlocksDeletionACKProto blocksDeletionAck) {
+      String msg,
+      ContainerBlocksDeletionACKProto blocksDeletionAck) {
     super(type, cmdId, status, msg);
     this.blocksDeletionAck = blocksDeletionAck;
   }
@@ -53,7 +57,8 @@ public CommandStatus getFromProtoBuf(
   }
 
   @Override
-  public StorageContainerDatanodeProtocolProtos.CommandStatus getProtoBufMessage() {
+  public StorageContainerDatanodeProtocolProtos.CommandStatus
+      getProtoBufMessage() {
     StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder =
         StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder()
             .setCmdId(this.getCmdId())
@@ -68,6 +73,9 @@ public CommandStatus getFromProtoBuf(
     return builder.build();
   }
 
+  /**
+   * Builder for DeleteBlockCommandStatus.
+   */
   public static final class DeleteBlockCommandStatusBuilder
       extends CommandStatusBuilder {
     private ContainerBlocksDeletionACKProto blocksDeletionAck = null;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index 8e8e4f0369d4..514c8224bca9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -16,28 +16,24 @@
  */
 package org.apache.hadoop.ozone.container.common;
 
-import com.google.protobuf.BlockingService;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-
+import com.google.protobuf.BlockingService;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 
 /**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index fb851949997c..a6ba103174e6 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -94,8 +94,8 @@ public void testGetHandlerForInvalidContainerType() {
     Assert.assertEquals("New ContainerType detected. Not an invalid " +
         "containerType", invalidContainerType, null);
 
-    Handler handler = dispatcher.getHandler(invalidContainerType);
+    Handler dispatcherHandler = dispatcher.getHandler(invalidContainerType);
     Assert.assertEquals("Get Handler for Invalid ContainerType should " +
-        "return null.", handler, null);
+        "return null.", dispatcherHandler, null);
   }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index ba5078b402cc..a118c3ed607e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -66,10 +66,10 @@
 
   private final String name;
 
-  protected final Map<Long, TIMEOUT_PAYLOAD> trackedEventsByID =
+  private final Map<Long, TIMEOUT_PAYLOAD> trackedEventsByID =
       new ConcurrentHashMap<>();
 
-  protected final Set<TIMEOUT_PAYLOAD> trackedEvents = new HashSet<>();
+  private final Set<TIMEOUT_PAYLOAD> trackedEvents = new HashSet<>();
 
   private final Map<Long, Long> startTrackingTimes = new HashedMap();
 
@@ -206,4 +206,12 @@ protected EventWatcherMetrics getMetrics() {
   public TIMEOUT_PAYLOAD getTrackedEventbyId(long id) {
     return trackedEventsByID.get(id);
   }
+
+  public Map<Long, TIMEOUT_PAYLOAD> getTrackedEventsByID() {
+    return trackedEventsByID;
+  }
+
+  public Set<TIMEOUT_PAYLOAD> getTrackedEvents() {
+    return trackedEvents;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java
index 736daac54c29..4090f6bb8731 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java
@@ -20,6 +20,9 @@
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
+/**
+ * Event handler for PedingDeleteStatuList events.
+ */
 public class PendingDeleteHandler implements
     EventHandler<PendingDeleteStatusList> {
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index d65e45f90d33..6ff334098dcc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -22,6 +22,7 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Objects;
 import java.util.Set;
 import java.util.UUID;
@@ -211,9 +212,7 @@ public void run() {
                     .add(replica.getDatanodeDetails());
               });
 
-          for(UUID originId : originIdToDnMap.keySet()) {
-            final List<DatanodeDetails> listOfReplica =
-                originIdToDnMap.get(originId);
+          for (List<DatanodeDetails> listOfReplica : originIdToDnMap.values()) {
             if (listOfReplica.size() > 1) {
               final int toDelete = Math.min(listOfReplica.size() - 1,
                   numberOfReplicasToDelete);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
index e49a79c64f6b..37525b0076e8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
@@ -38,6 +38,8 @@
  * This information is built from the DN container reports.
  */
 public class Node2ObjectsMap<T> {
+
+  @SuppressWarnings("visibilitymodifier")
   protected final Map<UUID, Set<T>> dn2ObjectMap;
 
   /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
index bf19261d4b7e..20fe797c38d4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
@@ -27,10 +27,12 @@
 import java.util.UUID;
 
 /**
- * This data structure maintains the list of pipelines which the given datanode is a part of. This
- * information will be added whenever a new pipeline allocation happens.
+ * This data structure maintains the list of pipelines which the given
+ * datanode is a part of. This information will be added whenever a new
+ * pipeline allocation happens.
  *
- * <p>TODO: this information needs to be regenerated from pipeline reports on SCM restart
+ * <p>TODO: this information needs to be regenerated from pipeline reports
+ * on SCM restart
  */
 public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index debde6523a96..e99739cf58dc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -400,6 +400,7 @@ public static ContainerReplicaProto getRandomContainerInfo(
    *
    * @return ContainerInfo
    */
+  @SuppressWarnings("parameternumber")
   public static ContainerReplicaProto createContainerInfo(
       long containerId, long size, long keyCount, long bytesUsed,
       long readCount, long readBytes, long writeCount, long writeBytes) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
index 279acf04b6f3..9fea7db60615 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
@@ -45,6 +45,9 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 
+/**
+ * Unit test for command status report handler.
+ */
 public class TestCommandStatusReportHandler implements EventPublisher {
 
   private static final Logger LOG = LoggerFactory
@@ -67,7 +70,6 @@ public void testCommandStatusReport() {
     assertFalse(logCapturer.getOutput().contains("Delete_Block_Status"));
     assertFalse(logCapturer.getOutput().contains("Replicate_Command_Status"));
 
-
     report = this.getStatusReport(this.getCommandStatusList());
     cmdStatusReportHandler.onMessage(report, this);
     assertTrue(logCapturer.getOutput().contains("firing event of type " +
@@ -92,8 +94,8 @@ private CommandStatusReportFromDatanode getStatusReport(
   }
 
   @Override
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent
-      (EVENT_TYPE event, PAYLOAD payload) {
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void
+      fireEvent(EVENT_TYPE event, PAYLOAD payload) {
     LOG.info("firing event of type {}, payload {}", event.getName(), payload
         .toString());
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index f37b447c7a77..43aaa85a4d97 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -212,8 +212,8 @@ public void testCloseContainer() throws IOException {
         HddsProtos.LifeCycleEvent.FINALIZE);
     containerManager.updateContainerState(id,
         HddsProtos.LifeCycleEvent.CLOSE);
-   ContainerInfo closedContainer = containerManager.getContainer(id);
-   Assert.assertEquals(LifeCycleState.CLOSED, closedContainer.getState());
+    ContainerInfo closedContainer = containerManager.getContainer(id);
+    Assert.assertEquals(LifeCycleState.CLOSED, closedContainer.getState());
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 764daff77585..f406016a4628 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -36,6 +36,9 @@
 import org.mockito.Mockito;
 import static org.mockito.Mockito.when;
 
+/**
+ * Test for the scm container placement.
+ */
 public class TestSCMContainerPlacementCapacity {
   @Test
   public void chooseDatanodes() throws SCMException {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index b652b6b76b5f..d285a3f5ab40 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -34,6 +34,9 @@
 import org.mockito.Mockito;
 import static org.mockito.Mockito.when;
 
+/**
+ * Test for the random container placement.
+ */
 public class TestSCMContainerPlacementRandom {
 
   @Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 0e7e04c8eaf9..9fbefcb3f19b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -259,11 +259,11 @@ public void testOnMessageReplicaFailure() throws Exception {
         "DeadNode event for a unregistered node"));
   }
 
-  private void registerReplicas(ContainerManager containerManager,
+  private void registerReplicas(ContainerManager contManager,
       ContainerInfo container, DatanodeDetails... datanodes)
       throws ContainerNotFoundException {
     for (DatanodeDetails datanode : datanodes) {
-      containerManager.updateContainerReplica(
+      contManager.updateContainerReplica(
           new ContainerID(container.getContainerID()),
           ContainerReplica.newBuilder()
               .setContainerState(ContainerReplicaProto.State.OPEN)
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index f9b139294f78..fa163eb23af7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -35,6 +35,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+/**
+ * Test for the Node Report Handler.
+ */
 public class TestNodeReportHandler implements EventPublisher {
 
   private static final Logger LOG = LoggerFactory
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
index 5bf84c21fea7..aa838a8f02b5 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
@@ -25,6 +25,7 @@
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.charset.Charset;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.List;
@@ -113,7 +114,7 @@ public void accept(Path path) {
               BufferedReader is = Files.newBufferedReader(path)
           ) {
             getLog().info("Compressing " + path + " to " + outFile);
-            IOUtils.copy(is, os);
+            IOUtils.copy(is, os, Charset.forName("UTF-8"));
           }
         } else {
           throw new IOException("Directory " + outFile.getParent()
diff --git a/hadoop-ozone/Jenkinsfile b/hadoop-ozone/Jenkinsfile
new file mode 100644
index 000000000000..3ecd6f2083b8
--- /dev/null
+++ b/hadoop-ozone/Jenkinsfile
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+node("ubuntu") {
+    docker.image('elek/ozone-build').pull()
+    docker.image('elek/ozone-build').inside {
+
+        stage('Checkout') {
+            checkout scm
+        }
+
+        stage('Clean') {
+            status = sh returnStatus: true, script: 'mvn clean'
+        }
+
+        stageRunner('Author', "author", {})
+
+        stageRunner('Isolation', "isolation", {})
+
+
+        stageRunner('Build', "build", {})
+
+        stageRunner('Licence', "rat", {
+            archiveArtifacts 'target/rat-aggregated.txt'
+        }, 'artifact/target/rat-aggregated.txt/*view*/')
+
+        stageRunner('Unit test', "unit", {
+            junit '**/target/surefire-reports/*.xml'
+        }, 'testReport/')
+
+        stageRunner('Findbugs', "findbugs", {
+            archiveArtifacts 'target/findbugs-all.txt'
+
+        }, 'artifact/target/findbugs-all.txt/*view*/')
+
+        stageRunner('Checkstyle', "checkstyle", {
+            checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-result.xml', unHealthy: ''
+        }, 'checkstyleResult')
+
+    }
+
+}
+
+def stageRunner(name, type, processResult, url = '') {
+    try {
+        stage(name) {
+            prStatusStart(type)
+            status = sh returnStatus: true, script: 'hadoop-ozone/dev-support/checks/' + type + '.sh'
+            processResult()
+            prStatusResult(status, type, url)
+        }
+        return true
+    } catch (RuntimeException ex) {
+        currentBuild.result = "FAILED"
+        return false
+    }
+}
+
+def prStatusStart(name) {
+    if (env.CHANGE_ID) {
+        pullRequest.createStatus(status: "pending",
+                context: 'continuous-integration/jenkins/pr-merge/' + name,
+                description: name + " is started")
+    }
+}
+
+def prStatusResult(responseCode, name, url = '') {
+    status = "error"
+    desc = "failed"
+    if (responseCode == 0) {
+        status = "success"
+        desc = "passed"
+    }
+    message = name + " is " + desc
+    //System.out.println(responseCode)
+    if (env.CHANGE_ID) {
+        if (url) {
+            pullRequest.createStatus(status: status,
+                    context: 'continuous-integration/jenkins/pr-merge/' + name,
+                    description: message,
+                    targetUrl: env.BUILD_URL + url)
+        } else {
+            pullRequest.createStatus(status: status,
+                    context: 'continuous-integration/jenkins/pr-merge/' + name,
+                    description: message)
+        }
+    }
+    if (responseCode != 0) {
+        throw new RuntimeException(message)
+    }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 9d1571699e64..34382c531aa8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -103,6 +103,7 @@
    * @param versioning versioning status of the bucket.
    * @param creationTime creation time of the bucket.
    */
+  @SuppressWarnings("parameternumber")
   public OzoneBucket(Configuration conf, ClientProtocol proxy,
                      String volumeName, String bucketName,
                      List<OzoneAcl> acls, StorageType storageType,
@@ -125,6 +126,7 @@ public OzoneBucket(Configuration conf, ClientProtocol proxy,
   }
 
   @VisibleForTesting
+  @SuppressWarnings("parameternumber")
   OzoneBucket(String volumeName, String name,
       ReplicationFactor defaultReplication,
       ReplicationType defaultReplicationType,
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
index 7c22f1d82aef..97ab2cf3d3ed 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
@@ -35,6 +35,7 @@
   /**
    * Constructs OzoneKeyDetails from OmKeyInfo.
    */
+  @SuppressWarnings("parameternumber")
   public OzoneKeyDetails(String volumeName, String bucketName, String keyName,
                          long size, long creationTime, long modificationTime,
                          List<OzoneKeyLocation> ozoneKeyLocations,
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index e451b1ac24bf..e52f67903a85 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -82,6 +82,7 @@
    * @param creationTime creation time of the volume
    * @param acls ACLs associated with the volume.
    */
+  @SuppressWarnings("parameternumber")
   public OzoneVolume(Configuration conf, ClientProtocol proxy, String name,
                      String admin, String owner, long quotaInBytes,
                      long creationTime, List<OzoneAcl> acls) {
@@ -265,12 +266,13 @@ public void deleteBucket(String bucketName) throws IOException {
 
 
     /**
-     * Creates an Iterator to iterate over all buckets after prevBucket in the volume.
+     * Creates an Iterator to iterate over all buckets after prevBucket in
+     * the volume.
      * If prevBucket is null it iterates from the first bucket in the volume.
      * The returned buckets match bucket prefix.
      * @param bucketPrefix
      */
-    public BucketIterator(String bucketPrefix, String prevBucket) {
+    BucketIterator(String bucketPrefix, String prevBucket) {
       this.bucketPrefix = bucketPrefix;
       this.currentValue = null;
       this.currentIterator = getNextListOfBuckets(prevBucket).iterator();
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 56327566aea7..0cded71af630 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -136,7 +136,7 @@ public synchronized int read(byte[] b, int off, int len) throws IOException {
       off += numBytesRead;
       len -= numBytesRead;
       if (current.getRemaining() <= 0 &&
-        ((currentStreamIndex + 1) < streamEntries.size())) {
+          ((currentStreamIndex + 1) < streamEntries.size())) {
         currentStreamIndex += 1;
       }
     }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 5e7cb9baeb14..1c82ef4f516c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -86,6 +86,7 @@
    * A constructor for testing purpose only.
    */
   @VisibleForTesting
+  @SuppressWarnings("parameternumber")
   public KeyOutputStream() {
     streamEntries = new ArrayList<>();
     omClient = null;
@@ -143,6 +144,8 @@ public XceiverClientManager getXceiverClientManager() {
     return locationInfoList;
   }
 
+
+  @SuppressWarnings("parameternumber")
   public KeyOutputStream(OpenKeySession handler,
       XceiverClientManager xceiverClientManager,
       StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
@@ -654,6 +657,7 @@ public KeyOutputStream build() throws IOException {
     private final long watchTimeout;
     private List<ByteBuffer> bufferList;
 
+    @SuppressWarnings("parameternumber")
     BlockOutputStreamEntry(BlockID blockID, String key,
         XceiverClientManager xceiverClientManager,
         XceiverClientSpi xceiverClient, String requestId, int chunkSize,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
new file mode 100644
index 000000000000..89d7de088991
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.protocol;
+/**
+ * Helper classes for the hdds protocol.
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index abd60a8a00b2..dd6fee9d87cf 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone;
 
 import java.io.File;
-import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Collection;
 import java.util.Optional;
@@ -27,11 +26,8 @@
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.net.NetUtils;
-
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
 import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
@@ -40,6 +36,8 @@
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Stateless helper functions for the server and client side of OM
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
index ffbca6a2b5d7..3c60e5956d47 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
@@ -115,6 +115,8 @@ public String getDescription() {
    * {@link OzoneGetConf.Command}.
    */
   static class CommandHandler {
+
+    @SuppressWarnings("visibilitymodifier")
     protected String key; // Configuration key to lookup
 
     CommandHandler() {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java
index 7f5e5d011d50..ba421939c4e3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java
@@ -39,7 +39,8 @@
   @Override
   public OmBucketInfo fromPersistedFormat(byte[] rawData) {
     Preconditions
-        .checkNotNull("Null byte array can't converted to real object.");
+        .checkNotNull(rawData,
+            "Null byte array can't converted to real object.");
     try {
       return OmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(rawData));
     } catch (InvalidProtocolBufferException e) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
index 93aaeb4a1e48..e71085d3ff93 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
@@ -39,7 +39,8 @@
   @Override
   public OmKeyInfo fromPersistedFormat(byte[] rawData) {
     Preconditions
-        .checkNotNull("Null byte array can't converted to real object.");
+        .checkNotNull(rawData,
+            "Null byte array can't converted to real object.");
     try {
       return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(rawData));
     } catch (InvalidProtocolBufferException e) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java
index 4445bf3ea2ee..5071bfd853fa 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java
@@ -43,7 +43,7 @@
    * return null.
    */
   public OmMultipartKeyInfo fromPersistedFormat(byte[] rawData) {
-    Preconditions.checkNotNull(
+    Preconditions.checkNotNull(rawData,
         "Null byte array can't converted to real object.");
     try {
       return OmMultipartKeyInfo.getFromProto(OzoneManagerProtocolProtos
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java
index f31d45024659..fb7ce72ab890 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java
@@ -39,7 +39,8 @@
   @Override
   public OmVolumeArgs fromPersistedFormat(byte[] rawData) {
     Preconditions
-        .checkNotNull("Null byte array can't converted to real object.");
+        .checkNotNull(rawData,
+            "Null byte array can't converted to real object.");
     try {
       return OmVolumeArgs.getFromProtobuf(VolumeInfo.parseFrom(rawData));
     } catch (InvalidProtocolBufferException e) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java
index f71da2519f28..dca3e478eba6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java
@@ -38,7 +38,8 @@
   @Override
   public VolumeList fromPersistedFormat(byte[] rawData) {
     Preconditions
-        .checkNotNull("Null byte array can't converted to real object.");
+        .checkNotNull(rawData,
+            "Null byte array can't converted to real object.");
     try {
       return VolumeList.parseFrom(rawData);
     } catch (InvalidProtocolBufferException e) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index a3ae7e5f1002..3708315892d9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -22,16 +22,15 @@
 import java.util.Map;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.Auditable;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.BucketArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 
+import com.google.common.base.Preconditions;
+
 /**
  * A class that encapsulates Bucket Arguments.
  */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 0bff1f7026ac..994883c37f8c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -17,21 +17,20 @@
  */
 package org.apache.hadoop.ozone.om.helpers;
 
-import com.google.common.base.Preconditions;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.Auditable;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
+import com.google.common.base.Preconditions;
 
 /**
  * A class that encapsulates Bucket Info.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index 7ded812bab25..38610f463ba5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -43,6 +43,7 @@
   private final String multipartUploadID;
   private final int multipartUploadPartNumber;
 
+  @SuppressWarnings("parameternumber")
   private OmKeyArgs(String volumeName, String bucketName, String keyName,
       long dataSize, ReplicationType type, ReplicationFactor factor,
       List<OmKeyLocationInfo> locationInfoList, boolean isMultipart,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 030844efff8c..cc90ee5165d9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -45,6 +45,7 @@
   private HddsProtos.ReplicationType type;
   private HddsProtos.ReplicationFactor factor;
 
+  @SuppressWarnings("parameternumber")
   private OmKeyInfo(String volumeName, String bucketName, String keyName,
                     List<OmKeyLocationInfoGroup> versions, long dataSize,
                     long creationTime, long modificationTime,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
index d47674820840..c408b037cd64 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
@@ -37,7 +37,8 @@
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class OzoneVersionInfo {
-  private static final Logger LOG = LoggerFactory.getLogger(OzoneVersionInfo.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OzoneVersionInfo.class);
 
   private Properties info;
 
@@ -46,8 +47,9 @@ protected OzoneVersionInfo(String component) {
     String versionInfoFile = component + "-version-info.properties";
     InputStream is = null;
     try {
-      is = ThreadUtil.getResourceAsStream(OzoneVersionInfo.class.getClassLoader(),
-          versionInfoFile);
+      is = ThreadUtil
+          .getResourceAsStream(OzoneVersionInfo.class.getClassLoader(),
+              versionInfoFile);
       info.load(is);
     } catch (IOException ex) {
       LoggerFactory.getLogger(getClass()).warn("Could not read '" +
@@ -57,130 +59,51 @@ protected OzoneVersionInfo(String component) {
     }
   }
 
-  protected String _getVersion() {
+  protected String getVersion() {
     return info.getProperty("version", "Unknown");
   }
 
-  protected String _getRelease() {
+  protected String getRelease() {
     return info.getProperty("release", "Unknown");
   }
 
-  protected String _getRevision() {
+  protected String getRevision() {
     return info.getProperty("revision", "Unknown");
   }
 
-  protected String _getBranch() {
+  protected String getBranch() {
     return info.getProperty("branch", "Unknown");
   }
 
-  protected String _getDate() {
+  protected String getDate() {
     return info.getProperty("date", "Unknown");
   }
 
-  protected String _getUser() {
+  protected String getUser() {
     return info.getProperty("user", "Unknown");
   }
 
-  protected String _getUrl() {
+  protected String getUrl() {
     return info.getProperty("url", "Unknown");
   }
 
-  protected String _getSrcChecksum() {
+  protected String getSrcChecksum() {
     return info.getProperty("srcChecksum", "Unknown");
   }
 
-  protected String _getBuildVersion(){
-    return _getVersion() +
-      " from " + _getRevision() +
-      " by " + _getUser() +
-      " source checksum " + _getSrcChecksum();
+  protected String getBuildVersion() {
+    return getVersion() +
+        " from " + getRevision() +
+        " by " + getUser() +
+        " source checksum " + getSrcChecksum();
   }
 
-  protected String _getProtocVersion() {
+  protected String getProtocVersion() {
     return info.getProperty("protocVersion", "Unknown");
   }
 
-  private static OzoneVersionInfo OZONE_VERSION_INFO = new OzoneVersionInfo("ozone");
-  /**
-   * Get the Ozone version.
-   * @return the Ozone version string, eg. "0.6.3-dev"
-   */
-  public static String getVersion() {
-    return OZONE_VERSION_INFO._getVersion();
-  }
-
-  /**
-   * Get the Ozone release name.
-   * @return the Ozone release string, eg. "Acadia"
-   */
-  public static String getRelease() {
-    return OZONE_VERSION_INFO._getRelease();
-  }
-
-  /**
-   * Get the Git commit hash of the repository when compiled.
-   * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
-   */
-  public static String getRevision() {
-    return OZONE_VERSION_INFO._getRevision();
-  }
-
-  /**
-   * Get the branch on which this originated.
-   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
-   */
-  public static String getBranch() {
-    return OZONE_VERSION_INFO._getBranch();
-  }
-
-  /**
-   * The date that Ozone was compiled.
-   * @return the compilation date in unix date format
-   */
-  public static String getDate() {
-    return OZONE_VERSION_INFO._getDate();
-  }
-
-  /**
-   * The user that compiled Ozone.
-   * @return the username of the user
-   */
-  public static String getUser() {
-    return OZONE_VERSION_INFO._getUser();
-  }
-
-  /**
-   * Get the URL for the Ozone repository.
-   * @return the URL of the Ozone repository
-   */
-  public static String getUrl() {
-    return OZONE_VERSION_INFO._getUrl();
-  }
-
-  /**
-   * Get the checksum of the source files from which Ozone was built.
-   * @return the checksum of the source files
-   */
-  public static String getSrcChecksum() {
-    return OZONE_VERSION_INFO._getSrcChecksum();
-  }
-
-  /**
-   * Returns the buildVersion which includes version,
-   * revision, user and date.
-   * @return the buildVersion
-   */
-  public static String getBuildVersion(){
-    return OZONE_VERSION_INFO._getBuildVersion();
-  }
-
-  /**
-   * Returns the protoc version used for the build.
-   * @return the protoc version
-   */
-  public static String getProtocVersion(){
-    return OZONE_VERSION_INFO._getProtocVersion();
-  }
+  private static final OzoneVersionInfo OZONE_VERSION_INFO =
+      new OzoneVersionInfo("ozone");
 
   public static void main(String[] args) {
     System.out.println(
@@ -200,12 +123,18 @@ public static void main(String[] args) {
         "             ///////////     ////////            \n" +
         "               //////  ////////////              \n" +
         "               ///   //////////                  \n" +
-        "              /    "+ getVersion() + "("+ getRelease() +")\n");
-    System.out.println("Source code repository " + getUrl() + " -r " +
-        getRevision());
-    System.out.println("Compiled by " + getUser() + " on " + getDate());
-    System.out.println("Compiled with protoc " + getProtocVersion());
-    System.out.println("From source with checksum " + getSrcChecksum() + "\n");
+            "              /    " + OZONE_VERSION_INFO.getVersion() + "("
+            + OZONE_VERSION_INFO.getRelease() + ")\n");
+    System.out.println(
+        "Source code repository " + OZONE_VERSION_INFO.getUrl() + " -r " +
+            OZONE_VERSION_INFO.getRevision());
+    System.out.println("Compiled by " + OZONE_VERSION_INFO.getUser() + " on "
+        + OZONE_VERSION_INFO.getDate());
+    System.out.println(
+        "Compiled with protoc " + OZONE_VERSION_INFO.getProtocVersion());
+    System.out.println(
+        "From source with checksum " + OZONE_VERSION_INFO.getSrcChecksum()
+            + "\n");
     LOG.debug("This command was run using " +
         ClassUtil.findContainingJar(OzoneVersionInfo.class));
     HddsVersionInfo.main(args);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
new file mode 100644
index 000000000000..7bc89c17b673
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.util;
+
+/**
+ * Ozone utilities.
+ */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java
index 1d67c67f598a..6bafac1c9175 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java
@@ -66,6 +66,7 @@ public String getVolumeName() {
    * @param headers - http headers
    * @param groups - list of groups allowed to access the volume
    */
+  @SuppressWarnings("parameternumber")
   public VolumeArgs(String userName, String volumeName, String requestID,
                     String hostName, Request request, UriInfo info,
                     HttpHeaders headers, String[] groups) {
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
index d2336e82a719..93dfc4dba5a7 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
@@ -48,14 +48,15 @@ public void testGetVolumeName() {
     assertEquals(objInfo.getVolumeName(), volume);
   }
 
-  private OzoneObjInfo.Builder getBuilder(String volume, String bucket,
-      String key) {
+  private OzoneObjInfo.Builder getBuilder(String withVolume,
+      String withBucket,
+      String withKey) {
     return OzoneObjInfo.Builder.newBuilder()
         .setResType(ResourceType.VOLUME)
         .setStoreType(STORE)
-        .setVolumeName(volume)
-        .setBucketName(bucket)
-        .setKeyName(key);
+        .setVolumeName(withVolume)
+        .setBucketName(withBucket)
+        .setKeyName(withKey);
   }
 
   @Test
diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh
new file mode 100755
index 000000000000..0a4c5d6f6fe0
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/acceptance.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export HADOOP_VERSION=3
+hadoop-ozone/dist/target/ozone-*-SNAPSHOT/smoketest/test.sh
+exit $?
diff --git a/hadoop-ozone/dev-support/checks/author.sh b/hadoop-ozone/dev-support/checks/author.sh
new file mode 100755
index 000000000000..43caa7081f06
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/author.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+mkdir -p ./target
+grep -r --include="*.java" "@author" .
+if [ $? -gt 0 ]; then
+  exit 0
+else
+  exit -1
+fi
diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh
new file mode 100755
index 000000000000..6a7811ecd7c3
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/build.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export MAVEN_OPTS="-Xmx4096m"
+mvn -am -pl :hadoop-ozone-dist -P hdds -Dmaven.javadoc.skip=true -DskipTests clean install
+exit $?
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh
new file mode 100755
index 000000000000..60efef0eded1
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+mvn -fn checkstyle:check -am -pl :hadoop-ozone-dist -Phdds
+
+violations=$(grep -r error --include checkstyle-errors.xml | wc -l)
+if [[ $violations -gt 0 ]]; then
+    echo "There are $violations checkstyle violations"
+    exit -1
+fi
+exit 0
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh
new file mode 100755
index 000000000000..45a4ad5b7d0f
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/findbugs.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FINDBUGS_ALL_FILE=./target/findbugs-all.txt
+
+mkdir -p ./target
+rm "$FINDBUGS_ALL_FILE" || true
+touch "$FINDBUGS_ALL_FILE"
+
+mvn -fn findbugs:check -Dfindbugs.failOnError=false  -am -pl :hadoop-ozone-dist -Phdds
+
+find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
+find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
+
+
+bugs=$(cat "$FINDBUGS_ALL_FILE" | wc -l)
+
+if [[ ${bugs} -gt 0 ]]; then
+   exit -1
+else
+   exit 0
+fi
\ No newline at end of file
diff --git a/hadoop-ozone/dev-support/checks/isolation.sh b/hadoop-ozone/dev-support/checks/isolation.sh
new file mode 100755
index 000000000000..c8f2c628b0d3
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/isolation.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+hadooplines=$(git diff --name-only HEAD~1..HEAD | grep -v hadoop-ozone | grep -v hadoop-hdds | wc -l )
+if [ "$hadooplines" == "0" ]; then
+  echo "Only ozone/hdds subprojects are changed"
+  exit 0
+else
+  echo "Main hadoop projects are changed in an ozone patch."
+  echo "Please do it in a HADOOP/HDFS patch and test it with hadoop precommit tests"
+  exit -1
+fi
diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh
new file mode 100755
index 000000000000..aadb251a2945
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/rat.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+mkdir -p target
+rm target/rat-aggregated.txt
+mvn -fn org.apache.rat:apache-rat-plugin:0.13:check -am -pl :hadoop-ozone-dist -Phdds
+grep -r --include=rat.txt "!????" | tee ./target/rat-aggregated.txt
+if [ "$(cat target/rat-aggregated.txt)" ]; then
+   exit -1
+fi
+
diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh
new file mode 100755
index 000000000000..d839f227c2b9
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/unit.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export MAVEN_OPTS="-Xmx4096m"
+mvn -fn test -am -pl :hadoop-ozone-dist -P hdds
+module_failed_tests=$(find "." -name 'TEST*.xml'\
+    | xargs "grep" -l -E "<failure|<error"\
+    | awk -F/ '{sub("'"TEST-JUNIT_TEST_OUTPUT_DIR"'",""); sub(".xml",""); print $NF}')
+if [[ -n "${module_failed_tests}" ]] ; then
+    exit -1
+fi
+exit 0
diff --git a/hadoop-ozone/dev-support/docker/Dockerfile b/hadoop-ozone/dev-support/docker/Dockerfile
new file mode 100644
index 000000000000..a84367ee00f3
--- /dev/null
+++ b/hadoop-ozone/dev-support/docker/Dockerfile
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+FROM alpine
+RUN apk add --update --no-cache bash alpine-sdk maven grep openjdk8 py-pip rsync procps autoconf automake libtool findutils
+
+#Install real glibc
+RUN apk --no-cache add ca-certificates wget && \
+    wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
+    wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk && \
+    apk add glibc-2.28-r0.apk
+
+#Install protobuf
+RUN mkdir -p /usr/local/src/ && \
+    cd /usr/local/src/ && \
+    wget https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz && \
+    tar xvf protobuf-2.5.0.tar.gz && \
+    cd protobuf-2.5.0 && \
+    ./autogen.sh && \
+    ./configure --prefix=/usr && \
+    make && \
+    make install && \
+    protoc --version
+
+#Findbug install
+RUN mkdir -p /opt && \
+    curl -sL https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-3.0.1.tar.gz/download | tar -xz  && \
+     mv findbugs-* /opt/findbugs
+
+#Install apache-ant
+RUN mkdir -p /opt && \
+    curl -sL 'https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/ant/binaries/apache-ant-1.10.5-bin.tar.gz' | tar -xz  && \
+       mv apache-ant* /opt/ant
+
+#Install docker-compose
+RUN pip install docker-compose
+
+ENV PATH=$PATH:/opt/findbugs/bin
+
+RUN addgroup -g 1000 default && \
+   for i in $(seq 1 2000); do adduser jenkins$i -u $i -G default -h /tmp/ -H -D; done
+
+#This is a very huge local maven cache. Usually the mvn repository is not safe to be 
+#shared between builds as concurrent installls are not handled very well
+#A simple workaround is to provide all the required 3rd party lib in the docker image
+#It will be cached by docker, and any additional dependency can be downloaded, artifacts
+#can be installed
+USER jenkins1000
+RUN cd /tmp && \
+   git clone --depth=1 https://gitbox.apache.org/repos/asf/hadoop.git -b trunk && \
+   cd /tmp/hadoop && \
+   mvn package dependency:go-offline -DskipTests -P hdds -pl :hadoop-ozone-dist -am && \
+   rm -rf /tmp/.m2/repository/org/apache/hadoop/*hdds* && \
+   rm -rf /tmp/.m2/repository/org/apache/hadoop/*ozone* && \
+   find /tmp/.m2/repository -exec chmod o+wx {} \;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index 8d23afd31f04..8324d3feab2c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -41,6 +41,9 @@
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
     .ReplicationFactor.THREE;
 
+/**
+ * Test for the Node2Pipeline map.
+ */
 public class TestNode2PipelineMap {
 
   private static MiniOzoneCluster cluster;
@@ -80,7 +83,6 @@ public void shutdown() {
     }
   }
 
-
   @Test
   public void testPipelineMap() throws IOException {
 
@@ -90,7 +92,7 @@ public void testPipelineMap() throws IOException {
     ContainerID cId = ratisContainer.getContainerInfo().containerID();
     Assert.assertEquals(1, set.size());
     set.forEach(containerID ->
-            Assert.assertEquals(containerID, cId));
+        Assert.assertEquals(containerID, cId));
 
     List<DatanodeDetails> dns = ratisContainer.getPipeline().getNodes();
     Assert.assertEquals(3, dns.size());
@@ -102,7 +104,6 @@ public void testPipelineMap() throws IOException {
     pipelines.forEach(p -> Assert.assertEquals(p,
         ratisContainer.getPipeline().getId()));
 
-
     // Now close the container and it should not show up while fetching
     // containers by pipeline
     containerManager
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index b352e3680f46..875e63cb0db3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -211,7 +211,7 @@ void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode)
   /**
    * Builder class for MiniOzoneCluster.
    */
-  @SuppressWarnings("CheckStyle")
+  @SuppressWarnings("visibilitymodifier")
   abstract class Builder {
 
     protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
@@ -259,8 +259,8 @@ public Builder setClusterId(String id) {
       return this;
     }
 
-    public Builder setStartDataNodes(boolean startDataNodes) {
-      this.startDataNodes = startDataNodes;
+    public Builder setStartDataNodes(boolean nodes) {
+      this.startDataNodes = nodes;
       return this;
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index ee47b44cc1cb..06df73abe4bd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -254,15 +254,15 @@ public void restartHddsDatanode(int i, boolean waitForDatanode)
     datanodeService.stop();
     datanodeService.join();
     // ensure same ports are used across restarts.
-    Configuration conf = datanodeService.getConf();
+    Configuration config = datanodeService.getConf();
     int currentPort = datanodeService.getDatanodeDetails()
         .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-    conf.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
-    conf.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
+    config.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
+    config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
     int ratisPort = datanodeService.getDatanodeDetails()
         .getPort(DatanodeDetails.Port.Name.RATIS).getValue();
-    conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
-    conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
+    config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
+    config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
     hddsDatanodes.remove(i);
     if (waitForDatanode) {
       // wait for node to be removed from SCM healthy node list.
@@ -270,7 +270,7 @@ public void restartHddsDatanode(int i, boolean waitForDatanode)
     }
     String[] args = new String[]{};
     HddsDatanodeService service =
-        HddsDatanodeService.createHddsDatanodeService(args, conf);
+        HddsDatanodeService.createHddsDatanodeService(args, config);
     hddsDatanodes.add(i, service);
     service.start(null);
     if (waitForDatanode) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index c4e6da165f5c..7ea259da1bb0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -84,7 +84,8 @@ public static void performOperationOnKeyContainers(
       CheckedConsumer<BlockID, Exception> consumer,
       List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
 
-    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyLocationInfoGroups) {
+    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
+        omKeyLocationInfoGroups) {
       List<OmKeyLocationInfo> omKeyLocationInfos =
           omKeyLocationInfoGroup.getLocationList();
       for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 695b3f1915e6..2d678ad3293d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -47,6 +47,9 @@
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 
+/**
+ * Test container closing.
+ */
 public class TestCloseContainerByPipeline {
 
   private static MiniOzoneCluster cluster;
@@ -232,10 +235,12 @@ public void testCloseContainerViaRatis() throws IOException,
     }
   }
 
-  private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID,
+  private Boolean isContainerClosed(MiniOzoneCluster ozoneCluster,
+      long containerID,
       DatanodeDetails datanode) {
     ContainerData containerData;
-    for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) {
+    for (HddsDatanodeService datanodeService : ozoneCluster
+        .getHddsDatanodes()) {
       if (datanode.equals(datanodeService.getDatanodeDetails())) {
         containerData =
             datanodeService.getDatanodeStateMachine().getContainer()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index d36c25358471..b4dff7c0dda9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -149,7 +149,8 @@ static void runTestClientServer(
     XceiverClientSpi client = null;
     String containerName = OzoneUtils.getRequestID();
     try {
-      final Pipeline pipeline = ContainerTestHelper.createPipeline(numDatanodes);
+      final Pipeline pipeline =
+          ContainerTestHelper.createPipeline(numDatanodes);
       final OzoneConfiguration conf = new OzoneConfiguration();
       initConf.accept(pipeline, conf);
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java
index 5fefc538aa59..a45dee899e1a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java
@@ -67,7 +67,6 @@
  * This test class specified for testing Ozone datanode shell command.
  */
 @RunWith(value = Parameterized.class)
-
 public class TestOzoneDatanodeShell {
 
   private static final Logger LOG =
@@ -100,6 +99,7 @@
   }
 
   @Parameterized.Parameter
+  @SuppressWarnings("visibilitymodifier")
   public Class clientProtocol;
   /**
    * Create a MiniDFSCluster for testing with using distributed Ozone
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index bd05b929ee9d..b10cc620f755 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -125,6 +125,7 @@
   }
 
   @Parameterized.Parameter
+  @SuppressWarnings("visibilitymodifier")
   public Class clientProtocol;
   /**
    * Create a MiniDFSCluster for testing with using distributed Ozone
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index f0672925d703..88b7c043203f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -64,10 +64,11 @@ public static void shutdown() throws InterruptedException {
 
   @Test
   public void testAllocate() throws Exception {
-    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(),
-        containerOwner);
+    ContainerWithPipeline container =
+        storageContainerLocationClient.allocateContainer(
+            xceiverClientManager.getType(),
+            xceiverClientManager.getFactor(),
+            containerOwner);
     Assert.assertNotNull(container);
     Assert.assertNotNull(container.getPipeline().getFirstNode());
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
index 968024346a94..444ff3cf0ea1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
@@ -103,7 +103,8 @@ public void testPutAndGetKey() throws Exception {
     putKey(bucket, keyName, keyData);
   }
 
-  private void putKey(OzoneBucket bucket, String keyName, String keyData) throws IOException {
+  private void putKey(OzoneBucket bucket, String keyName, String keyData)
+      throws IOException {
     try (
         OzoneOutputStream ozoneOutputStream = bucket
             .createKey(keyName, 0, replicationType, replicationFactor);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
index fe96198636a5..917a0ad2d275 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
@@ -78,6 +78,7 @@
     return Arrays.asList(params);
   }
 
+  @SuppressWarnings("visibilitymodifier")
   @Parameterized.Parameter
   public static Class clientProtocol;
 
@@ -124,7 +125,7 @@ public void testCreateBucket() throws Exception {
     runTestCreateBucket(client);
   }
 
-  static void runTestCreateBucket(ClientProtocol client)
+  static void runTestCreateBucket(ClientProtocol protocol)
       throws IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
@@ -132,8 +133,8 @@ static void runTestCreateBucket(ClientProtocol client)
         .setQuota("100TB")
         .setAdmin("hdfs")
         .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    protocol.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
 
     // create 10 buckets under same volume
@@ -154,7 +155,7 @@ static void runTestCreateBucket(ClientProtocol client)
       // verify the bucket creation time
       assertTrue((bucket.getCreationTime() / 1000) >= (currentTime / 1000));
     }
-    client.close();
+    protocol.close();
 
     assertEquals(vol.getName(), volumeName);
     assertEquals(vol.getAdmin(), "hdfs");
@@ -179,7 +180,7 @@ public void testAddBucketAcls() throws Exception {
     runTestAddBucketAcls(client);
   }
 
-  static void runTestAddBucketAcls(ClientProtocol client)
+  static void runTestAddBucketAcls(ClientProtocol protocol)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
@@ -187,8 +188,8 @@ static void runTestAddBucketAcls(ClientProtocol client)
         .setQuota("100TB")
         .setAdmin("hdfs")
         .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    protocol.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     String bucketName = OzoneUtils.getRequestID().toLowerCase();
     vol.createBucket(bucketName);
@@ -203,7 +204,7 @@ static void runTestAddBucketAcls(ClientProtocol client)
     // verify if the creation time is missing after update operation
     assertTrue(
         (updatedBucket.getCreationTime()) / 1000 >= 0);
-    client.close();
+    protocol.close();
   }
 
   @Test
@@ -211,7 +212,7 @@ public void testRemoveBucketAcls() throws Exception {
     runTestRemoveBucketAcls(client);
   }
 
-  static void runTestRemoveBucketAcls(ClientProtocol client)
+  static void runTestRemoveBucketAcls(ClientProtocol protocol)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
@@ -219,8 +220,8 @@ static void runTestRemoveBucketAcls(ClientProtocol client)
         .setQuota("100TB")
         .setAdmin("hdfs")
         .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    protocol.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     String bucketName = OzoneUtils.getRequestID().toLowerCase();
     List<OzoneAcl> aclList =
@@ -239,7 +240,7 @@ static void runTestRemoveBucketAcls(ClientProtocol client)
     // verify if the creation time is missing after update operation
     assertTrue(
         (updatedBucket.getCreationTime() / 1000) >= 0);
-    client.close();
+    protocol.close();
   }
 
   @Test
@@ -247,7 +248,7 @@ public void testDeleteBucket() throws OzoneException, IOException {
     runTestDeleteBucket(client);
   }
 
-  static void runTestDeleteBucket(ClientProtocol client)
+  static void runTestDeleteBucket(ClientProtocol protocol)
       throws OzoneException, IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
@@ -255,8 +256,8 @@ static void runTestDeleteBucket(ClientProtocol client)
         .setQuota("100TB")
         .setAdmin("hdfs")
         .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    protocol.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     String bucketName = OzoneUtils.getRequestID().toLowerCase();
     List<OzoneAcl> aclList =
@@ -274,7 +275,7 @@ static void runTestDeleteBucket(ClientProtocol client)
       // must throw
       assertNotNull(ex);
     }
-    client.close();
+    protocol.close();
   }
 
   @Test
@@ -282,7 +283,7 @@ public void testListBucket() throws Exception {
     runTestListBucket(client);
   }
 
-  static void runTestListBucket(ClientProtocol client)
+  static void runTestListBucket(ClientProtocol protocol)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
@@ -290,11 +291,11 @@ static void runTestListBucket(ClientProtocol client)
         .setQuota("100TB")
         .setAdmin("hdfs")
         .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    protocol.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     List<OzoneAcl> aclList =
-        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+        Arrays.stream(acls).map(OzoneAcl::parseAcl)
             .collect(Collectors.toList());
 
     long currentTime = Time.now();
@@ -321,7 +322,7 @@ static void runTestListBucket(ClientProtocol client)
     bucketIterator = vol.listBuckets(null, "listbucket-test-3");
     assertEquals(getSize(bucketIterator), 6);
 
-    client.close();
+    protocol.close();
   }
 
   private static int getSize(Iterator<? extends OzoneBucket> bucketIterator) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
index 687e7e6a2592..efb06983137b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
@@ -57,6 +57,7 @@
   }
 
   @Parameterized.Parameter
+  @SuppressWarnings("visibilitymodifier")
   public static Class clientProtocol;
 
   @BeforeClass
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index 3765bc81b102..f4b013f296aa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -72,6 +72,7 @@
     return Arrays.asList(params);
   }
 
+  @SuppressWarnings("visibilitymodifier")
   @Parameterized.Parameter
   public Class clientProtocol;
 
@@ -120,7 +121,7 @@ public void testCreateVolume() throws Exception {
     runTestCreateVolume(client);
   }
 
-  static void runTestCreateVolume(ClientProtocol client)
+  static void runTestCreateVolume(ClientProtocol clientProtocol)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
 
@@ -131,8 +132,8 @@ static void runTestCreateVolume(ClientProtocol client)
         .setQuota("100TB")
         .setAdmin("hdfs")
         .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    clientProtocol.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
 
     assertEquals(vol.getName(), volumeName);
     assertEquals(vol.getAdmin(), "hdfs");
@@ -147,7 +148,7 @@ static void runTestCreateVolume(ClientProtocol client)
     // not use Rule here because the test method is static.
     try {
       String invalidVolumeName = "#" + OzoneUtils.getRequestID().toLowerCase();
-      client.createVolume(invalidVolumeName);
+      clientProtocol.createVolume(invalidVolumeName);
       /*
       //TODO: RestClient and RpcClient should use HddsClientUtils to verify name
       fail("Except the volume creation be failed because the"
@@ -163,11 +164,11 @@ public void testCreateDuplicateVolume() throws OzoneException, IOException {
     runTestCreateDuplicateVolume(client);
   }
 
-  static void runTestCreateDuplicateVolume(ClientProtocol client)
+  static void runTestCreateDuplicateVolume(ClientProtocol clientProtocol)
       throws OzoneException, IOException {
     try {
-      client.createVolume("testvol");
-      client.createVolume("testvol");
+      clientProtocol.createVolume("testvol");
+      clientProtocol.createVolume("testvol");
       assertFalse(true);
     } catch (IOException ioe) {
       Assert.assertTrue(ioe.getMessage()
@@ -180,11 +181,11 @@ public void testDeleteVolume() throws OzoneException, IOException {
     runTestDeleteVolume(client);
   }
 
-  static void runTestDeleteVolume(ClientProtocol client)
+  static void runTestDeleteVolume(ClientProtocol clientProtocol)
       throws OzoneException, IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.createVolume(volumeName);
-    client.deleteVolume(volumeName);
+    clientProtocol.createVolume(volumeName);
+    clientProtocol.deleteVolume(volumeName);
   }
 
   @Test
@@ -192,13 +193,13 @@ public void testChangeOwnerOnVolume() throws Exception {
     runTestChangeOwnerOnVolume(client);
   }
 
-  static void runTestChangeOwnerOnVolume(ClientProtocol client)
+  static void runTestChangeOwnerOnVolume(ClientProtocol clientProtocol)
       throws OzoneException, ParseException, IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.createVolume(volumeName);
-    client.getVolumeDetails(volumeName);
-    client.setVolumeOwner(volumeName, "frodo");
-    OzoneVolume newVol = client.getVolumeDetails(volumeName);
+    clientProtocol.createVolume(volumeName);
+    clientProtocol.getVolumeDetails(volumeName);
+    clientProtocol.setVolumeOwner(volumeName, "frodo");
+    OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName);
     assertEquals(newVol.getOwner(), "frodo");
     // verify if the creation time is missing after setting owner operation
     assertTrue(newVol.getCreationTime() > 0);
@@ -209,12 +210,12 @@ public void testChangeQuotaOnVolume() throws Exception {
     runTestChangeQuotaOnVolume(client);
   }
 
-  static void runTestChangeQuotaOnVolume(ClientProtocol client)
+  static void runTestChangeQuotaOnVolume(ClientProtocol clientProtocol)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.createVolume(volumeName);
-    client.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB"));
-    OzoneVolume newVol = client.getVolumeDetails(volumeName);
+    clientProtocol.createVolume(volumeName);
+    clientProtocol.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB"));
+    OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName);
     assertEquals(newVol.getQuota(),
         OzoneQuota.parseQuota("1000MB").sizeInBytes());
     // verify if the creation time is missing after setting quota operation
@@ -229,14 +230,14 @@ public void testListVolume() throws OzoneException, IOException {
     runTestListVolume(client);
   }
 
-  static void runTestListVolume(ClientProtocol client)
+  static void runTestListVolume(ClientProtocol clientProtocol)
       throws OzoneException, IOException {
     for (int x = 0; x < 10; x++) {
       String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      client.createVolume(volumeName);
+      clientProtocol.createVolume(volumeName);
     }
 
-    List<OzoneVolume> ovols = client.listVolumes(null, null, 100);
+    List<OzoneVolume> ovols = clientProtocol.listVolumes(null, null, 100);
     assertTrue(ovols.size() >= 10);
   }
 
@@ -247,19 +248,19 @@ public void testListVolumePagination() throws OzoneException, IOException {
     runTestListVolumePagination(client);
   }
 
-  static void runTestListVolumePagination(ClientProtocol client)
+  static void runTestListVolumePagination(ClientProtocol clientProtocol)
       throws OzoneException, IOException {
     final int volCount = 2000;
     final int step = 100;
     for (int x = 0; x < volCount; x++) {
       String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      client.createVolume(volumeName);
+      clientProtocol.createVolume(volumeName);
     }
     String prevKey = null;
     int count = 0;
     int pagecount = 0;
     while (count < volCount) {
-      List<OzoneVolume> ovols = client.listVolumes(null, prevKey, step);
+      List<OzoneVolume> ovols = clientProtocol.listVolumes(null, prevKey, step);
       count += ovols.size();
       prevKey = ovols.get(ovols.size() - 1).getName();
       pagecount++;
@@ -274,7 +275,7 @@ public void testListAllVolumes() throws OzoneException, IOException {
     runTestListAllVolumes(client);
   }
 
-  static void runTestListAllVolumes(ClientProtocol client)
+  static void runTestListAllVolumes(ClientProtocol clientProtocol)
       throws OzoneException, IOException {
     final int volCount = 200;
     final int step = 10;
@@ -288,15 +289,15 @@ static void runTestListAllVolumes(ClientProtocol client)
           .setQuota("100TB")
           .setAdmin("hdfs")
           .build();
-      client.createVolume(volumeName, volumeArgs);
-      OzoneVolume vol = client.getVolumeDetails(volumeName);
+      clientProtocol.createVolume(volumeName, volumeArgs);
+      OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
       assertNotNull(vol);
     }
     String prevKey = null;
     int count = 0;
     int pagecount = 0;
     while (count < volCount) {
-      List<OzoneVolume> ovols = client.listVolumes(null, prevKey, step);
+      List<OzoneVolume> ovols = clientProtocol.listVolumes(null, prevKey, step);
       count += ovols.size();
       if (ovols.size() > 0) {
         prevKey = ovols.get(ovols.size() - 1).getName();
@@ -316,7 +317,7 @@ public void testListVolumes() throws Exception {
     runTestListVolumes(client);
   }
 
-  static void runTestListVolumes(ClientProtocol client)
+  static void runTestListVolumes(ClientProtocol clientProtocol)
       throws OzoneException, IOException, ParseException {
     final int volCount = 20;
     final String user1 = "test-user-a";
@@ -342,13 +343,14 @@ static void runTestListVolumes(ClientProtocol client)
           .setQuota("100TB")
           .setAdmin("hdfs")
           .build();
-      client.createVolume(volumeName, volumeArgs);
-      OzoneVolume vol = client.getVolumeDetails(volumeName);
+      clientProtocol.createVolume(volumeName, volumeArgs);
+      OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
       assertNotNull(vol);
     }
 
     // list all the volumes belong to user1
-    List<OzoneVolume> volumeList = client.listVolumes(user1, null, null, 100);
+    List<OzoneVolume> volumeList =
+        clientProtocol.listVolumes(user1, null, null, 100);
     assertEquals(10, volumeList.size());
     // verify the owner name and creation time of volume
     for (OzoneVolume vol : volumeList) {
@@ -358,25 +360,25 @@ static void runTestListVolumes(ClientProtocol client)
     }
 
     // test max key parameter of listing volumes
-    volumeList = client.listVolumes(user1, null, null, 2);
+    volumeList = clientProtocol.listVolumes(user1, null, null, 2);
     assertEquals(2, volumeList.size());
 
     // test prefix parameter of listing volumes
-    volumeList = client.listVolumes(user1, "test-vol10", null, 10);
+    volumeList = clientProtocol.listVolumes(user1, "test-vol10", null, 10);
     assertTrue(volumeList.size() == 1
         && volumeList.get(0).getName().equals("test-vol10"));
 
-    volumeList = client.listVolumes(user1, "test-vol1", null, 10);
+    volumeList = clientProtocol.listVolumes(user1, "test-vol1", null, 10);
     assertEquals(5, volumeList.size());
 
     // test start key parameter of listing volumes
-    volumeList = client.listVolumes(user2, null, "test-vol15", 10);
+    volumeList = clientProtocol.listVolumes(user2, null, "test-vol15", 10);
     assertEquals(2, volumeList.size());
 
     String volumeName;
     for (int x = 0; x < volCount; x++) {
       volumeName = "test-vol" + x;
-      client.deleteVolume(volumeName);
+      clientProtocol.deleteVolume(volumeName);
     }
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
index 1a05a3cab02c..186eed9ecfa8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
@@ -60,6 +60,7 @@
   }
 
   @Parameterized.Parameter
+  @SuppressWarnings("visibilitymodifier")
   public Class clientProtocol;
 
   @BeforeClass
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java
index 4c8b85b4b371..fe4b1b6280a1 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java
@@ -164,6 +164,7 @@ Response deleteBucket(@PathParam("volume") String volume,
           + "04:23:30 GMT", required = true, paramType = "header"),
       @ApiImplicitParam(name = "Authorization", example = "OZONE", required =
           true, paramType = "header")})
+  @SuppressWarnings("parameternumber")
   Response listBucket(@PathParam("volume") String volume,
                       @PathParam("bucket") String bucket,
                       @DefaultValue(Header.OZONE_INFO_QUERY_KEY)
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java
index ce370421e38d..efb6f68cdd2b 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Volume.java
@@ -170,6 +170,7 @@ Response deleteVolume(@PathParam("volume") String volume,
           + "04:23:30 GMT", required = true, paramType = "header"),
       @ApiImplicitParam(name = "Authorization", example = "OZONE", required =
           true, paramType = "header")})
+  @SuppressWarnings("parameternumber")
   Response getVolumeInfo(@PathParam("volume") String volume,
       @DefaultValue(Header.OZONE_INFO_QUERY_BUCKET)
       @QueryParam(Header.OZONE_INFO_QUERY_TAG) String info,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
index ddb2b0e26d3e..e82de80dc976 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
@@ -74,6 +74,6 @@ OmBucketInfo getBucketInfo(String volumeName, String bucketName)
    * @throws IOException
    */
   List<OmBucketInfo> listBuckets(String volumeName,
-                                 String startBucket, String bucketPrefix, int maxNumOfBuckets)
+      String startBucket, String bucketPrefix, int maxNumOfBuckets)
       throws IOException;
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
index b7b6929f400d..3ed04f104102 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
@@ -22,10 +22,9 @@
 import java.util.UUID;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 603bd184c90e..bfab48cf7449 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -28,20 +28,19 @@
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
+import org.apache.hadoop.ozone.om.codec.VolumeListCodec;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
-import org.apache.hadoop.ozone.om.codec.VolumeListCodec;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
-import org.apache.hadoop.util.Time;
 import org.apache.hadoop.utils.db.DBStore;
 import org.apache.hadoop.utils.db.DBStoreBuilder;
 import org.apache.hadoop.utils.db.Table;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
index 8475dd9e4a75..f25fce4ca20c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
@@ -96,5 +96,5 @@ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
    * @throws IOException
    */
   List<OmVolumeArgs> listVolumes(String userName, String prefix,
-                                 String startKey, int maxKeys) throws IOException;
+      String startKey, int maxKeys) throws IOException;
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java
index 73ee517b2c96..bc861acc0ea0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java
@@ -43,7 +43,8 @@
 /**
  * Ratis helper methods for OM Ratis server and client.
  */
-public class OMRatisHelper {
+public final class OMRatisHelper {
+
   private static final Logger LOG = LoggerFactory.getLogger(
       OMRatisHelper.class);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index c7b86a0aa183..ca54bae407d8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -214,6 +214,7 @@ private OMResponse submitRequestToRatis(OMRequest request) {
   /**
    * Submits request directly to OM.
    */
+  @SuppressWarnings("methodlength")
   private OMResponse submitRequestToOM(OMRequest request)
       throws ServiceException {
     Type cmdType = request.getCmdType();
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
index b0dfce64ffae..1074ef2dfb75 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
@@ -66,7 +66,7 @@ public int read() throws IOException {
   }
 
   @Override
-  public int read(byte b[], int off, int len) throws IOException {
+  public int read(byte[] b, int off, int len) throws IOException {
     if (b == null) {
       throw new NullPointerException();
     } else if (off < 0 || len < 0 || len > b.length - off) {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 1f0764b694f3..d3b36b48376e 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -74,6 +74,7 @@
    */
   @GET
   @SuppressFBWarnings
+  @SuppressWarnings("parameternumber")
   public Response list(
       @PathParam("bucket") String bucketName,
       @QueryParam("delimiter") String delimiter,
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
index dabbd2291b40..f4c3b944459f 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
@@ -19,7 +19,6 @@
  */
 package org.apache.hadoop.ozone.s3.endpoint;
 
-import javax.ws.rs.core.Response;
 import javax.xml.bind.JAXBException;
 import java.io.IOException;
 import java.util.HashSet;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
index c1f72755b8be..4b3154be2e56 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
@@ -145,7 +145,7 @@ private static boolean insertAudits(String dbName, String logs) {
       throws Exception {
     ArrayList<AuditEntry> listResult = new ArrayList<AuditEntry>();
     try(FileInputStream fis = new FileInputStream(filePath);
-        InputStreamReader isr = new InputStreamReader(fis);
+        InputStreamReader isr = new InputStreamReader(fis, "UTF-8");
         BufferedReader bReader = new BufferedReader(isr)) {
       String currentLine = null;
       String[] entry = null;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 5871b4966962..3c2708e6038e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -62,7 +62,6 @@
 import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.annotations.VisibleForTesting;
 import static java.lang.Math.min;
-import org.apache.commons.cli.CommandLine;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.time.DurationFormatUtils;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java
index 05e7920dbc44..99c8839d4f6a 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java
@@ -34,6 +34,9 @@
 import static org.apache.hadoop.ozone.genesis.GenesisUtil.CLOSED_TYPE;
 import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
 
+/**
+ * Measure metadatastore read performance.
+ */
 @State(Scope.Thread)
 public class BenchMarkMetadataStoreReads {
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java
index 4321287e1859..ebaaf3d432d8 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java
@@ -32,10 +32,12 @@
 import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_1GB_TYPE;
 import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
 
+/**
+ * Measure default metadatastore put performance.
+ */
 @State(Scope.Thread)
 public class BenchMarkMetadataStoreWrites {
 
-
   private static final int DATA_LEN = 1024;
   private static final long MAX_KEYS = 1024 * 10;
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
index 5f4e035cd3ea..1e8aa96fd83b 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
@@ -31,6 +31,9 @@
 import java.nio.charset.Charset;
 import java.nio.file.Paths;
 
+/**
+ * Benchmark rocksdb store.
+ */
 @State(Scope.Thread)
 public class BenchMarkRocksDbStore {
   private static final int DATA_LEN = 1024;
@@ -86,7 +89,8 @@ public void initialize() throws IOException {
     opts.setLevel0SlowdownWritesTrigger(20);
     opts.setLevel0StopWritesTrigger(40);
     opts.setTargetFileSizeBase(
-        (long) StorageUnit.MB.toBytes(Long.parseLong(maxBytesForLevelBase)) / 10);
+        (long) StorageUnit.MB.toBytes(Long.parseLong(maxBytesForLevelBase))
+            / 10);
     opts.setMaxBackgroundCompactions(8);
     opts.setUseFsync(false);
     opts.setBytesPerSync(8388608);


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-issues-help@hadoop.apache.org