You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2019/02/15 07:41:22 UTC

[hadoop] branch trunk updated: HDDS-1103.Fix rat/findbug/checkstyle errors in ozone/hdds projects. Contributed by Elek, Marton.

This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 75e15cc  HDDS-1103.Fix rat/findbug/checkstyle errors in ozone/hdds projects. Contributed by Elek, Marton.
75e15cc is described below

commit 75e15cc0c4c237e7f94e8cd2ea1dde0773e954b4
Author: Anu Engineer <ae...@apache.org>
AuthorDate: Thu Feb 14 23:33:25 2019 -0800

    HDDS-1103.Fix rat/findbug/checkstyle errors in ozone/hdds projects.
    Contributed by Elek, Marton.
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  1 -
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  3 +-
 .../certificate/authority/CertificateServer.java   |  5 +-
 .../client/DefaultCertificateClient.java           | 11 ++--
 .../certificate/client/OMCertificateClient.java    |  6 +-
 .../hadoop/hdds/security/x509/package-info.java    |  4 +-
 .../client/TestCertificateClientInit.java          |  1 +
 .../dev-support/findbugsExcludeFile.xml            | 12 ++++
 hadoop-hdds/container-service/pom.xml              |  6 ++
 .../common/transport/server/XceiverServer.java     |  9 +--
 .../server/ratis/ContainerStateMachine.java        |  1 +
 .../container/common/volume/AbstractFuture.java    | 45 +++++++------
 .../ozone/container/common/volume/HddsVolume.java  |  1 +
 .../container/common/volume/HddsVolumeChecker.java | 37 ++++++-----
 .../common/volume/ThrottledAsyncChecker.java       |  6 +-
 .../container/common/volume/TimeoutFuture.java     |  2 +-
 .../ozone/container/common/volume/VolumeSet.java   | 76 ++++++++++------------
 .../common/volume/TestVolumeSetDiskChecks.java     | 57 ++++++++--------
 .../TestKeyValueHandlerWithUnhealthyContainer.java | 70 ++++++++++----------
 hadoop-hdds/pom.xml                                |  1 +
 .../hdds/scm/pipeline/RatisPipelineUtils.java      | 16 ++---
 hadoop-hdfs-project/hadoop-hdfs/pom.xml            |  7 ++
 .../server/datanode/checker/AbstractFuture.java    |  1 +
 hadoop-ozone/Jenkinsfile                           | 69 +++++++++++---------
 .../ozone/client/io/BlockOutputStreamEntry.java    | 62 ++++++++----------
 .../hadoop/ozone/client/io/OzoneInputStream.java   |  2 -
 .../hadoop/ozone/om/helpers/OmBucketArgs.java      |  1 -
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  6 --
 .../hadoop/ozone/om/helpers/WithMetadata.java      |  1 +
 .../hadoop/ozone/security/OzoneSecretManager.java  |  1 +
 hadoop-ozone/dev-support/checks/findbugs.sh        |  7 +-
 .../{basic => auditparser}/auditparser.robot       |  0
 hadoop-ozone/dist/src/main/smoketest/test.sh       | 19 +++---
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  6 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |  2 +-
 .../impl/TestContainerDeletionChoosingPolicy.java  |  3 +-
 .../server/TestSecureContainerServer.java          |  3 +-
 .../org/apache/hadoop/ozone/om/OMNodeDetails.java  |  4 ++
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |  2 +
 .../protocolPB/OzoneManagerRequestHandler.java     |  2 +-
 .../web/ozShell/token/CancelTokenHandler.java      |  4 +-
 .../ozone/web/ozShell/token/PrintTokenHandler.java |  4 +-
 .../ozone/web/ozShell/token/RenewTokenHandler.java |  4 +-
 hadoop-ozone/ozonefs-lib-legacy/pom.xml            |  7 ++
 hadoop-ozone/ozonefs-lib/pom.xml                   |  7 ++
 hadoop-ozone/ozonefs/pom.xml                       |  7 ++
 .../hadoop/fs/ozone/OzoneClientAdapterFactory.java |  2 +
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java    |  2 +-
 hadoop-ozone/pom.xml                               |  1 +
 hadoop-ozone/tools/pom.xml                         |  7 ++
 .../hadoop/ozone/freon/RandomKeyGenerator.java     |  2 +
 .../apache/hadoop/ozone/fsck/BlockIdDetails.java   | 27 ++++----
 52 files changed, 362 insertions(+), 280 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 4cc5a75..9bae6d8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.metrics2.util.MBeans;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 14a3913..1f84ebe 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -219,7 +219,8 @@ public final class ScmConfigKeys {
       "ozone.scm.https-address";
   public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
       "hdds.scm.kerberos.keytab.file";
-  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = "hdds.scm.kerberos.principal";
+  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
+      "hdds.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
index 238e9b0..944883b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
@@ -65,8 +65,9 @@ public interface CertificateServer {
    * approved.
    * @throws SCMSecurityException - on Error.
    */
-  Future<X509CertificateHolder>
-      requestCertificate(PKCS10CertificationRequest csr, CertificateApprover.ApprovalType type)
+  Future<X509CertificateHolder> requestCertificate(
+      PKCS10CertificationRequest csr,
+      CertificateApprover.ApprovalType type)
       throws SCMSecurityException;
 
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index b407d77..466e1c5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -34,6 +34,7 @@ import org.slf4j.Logger;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.security.InvalidKeyException;
@@ -524,10 +525,9 @@ public abstract class DefaultCertificateClient implements CertificateClient {
         return FAILURE;
       }
     default:
-      getLogger().error("Unexpected case: {}, Private key:{} , " +
-          "public key:{}, certificate:{}", init,
-          ((init.ordinal() & 1 << 2) == 1), ((init.ordinal() & 1 << 1) == 1),
-          ((init.ordinal() & 1 << 0) == 1));
+      getLogger().error("Unexpected case: {} (private/public/cert)",
+          Integer.toBinaryString(init.ordinal()));
+
       return FAILURE;
     }
   }
@@ -584,7 +584,8 @@ public abstract class DefaultCertificateClient implements CertificateClient {
    * */
   protected boolean validateKeyPair(PublicKey pubKey)
       throws CertificateException {
-    byte[] challenge = RandomStringUtils.random(1000).getBytes();
+    byte[] challenge = RandomStringUtils.random(1000).getBytes(
+        StandardCharsets.UTF_8);
     byte[]  sign = signDataStream(new ByteArrayInputStream(challenge));
     return verifySignature(challenge, sign, pubKey);
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
index bddcb37..8938a15 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
@@ -90,10 +90,8 @@ public class OMCertificateClient extends DefaultCertificateClient {
         return FAILURE;
       }
     default:
-      LOG.error("Unexpected case: {}, Private key:{} , " +
-              "public key:{}, certificate:{}", init,
-          ((init.ordinal() & 1 << 2) == 1), ((init.ordinal() & 1 << 1) == 1),
-          ((init.ordinal() & 1 << 0) == 1));
+      LOG.error("Unexpected case: {} (private/public/cert)",
+          Integer.toBinaryString(init.ordinal()));
       return FAILURE;
     }
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
index 0a327ae..a6369c6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
@@ -73,8 +73,8 @@ data node not only sends the CSR but signs the request with a shared secret
 with SCM. SCM then can issue a certificate without the intervention of a
 human administrator.
 
-The last, TESTING method which never should be used other than in development and
-testing clusters, is merely a mechanism to bypass all identity checks. If
+The last, TESTING method which never should be used other than in development
+ and testing clusters, is merely a mechanism to bypass all identity checks. If
 this flag is setup, then CA will issue a CSR if the base approves all fields.
 
  * Please do not use this mechanism(TESTING) for any purpose other than
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java
index e076267..2bdf746 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java
@@ -56,6 +56,7 @@ import static org.junit.Assert.assertTrue;
  * Test class for {@link DefaultCertificateClient}.
  */
 @RunWith(Parameterized.class)
+@SuppressWarnings("visibilitymodifier")
 public class TestCertificateClientInit {
 
   private CertificateClient dnCertificateClient;
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
index 3571a89..18128e8 100644
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -18,4 +18,16 @@
   <Match>
     <Package name="org.apache.hadoop.hdds.protocol.proto"/>
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
+    <Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
+    <Bug pattern="DLS_DEAD_LOCAL_STORE" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.ozone.container.common.volume.AbstractFuture" />
+    <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
+  </Match>
 </FindBugsFilter>
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index cfbdc9f..cbb3b89 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -50,6 +50,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>snakeyaml</artifactId>
       <version>1.8</version>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>findbugs</artifactId>
+      <version>3.0.1</version>
+      <scope>provided</scope>
+    </dependency>
 
     <dependency>
       <groupId>io.dropwizard.metrics</groupId>
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
index 7691bdd..ea9f5cd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
@@ -18,7 +18,8 @@
 
 package org.apache.hadoop.ozone.container.common.transport.server;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -28,8 +29,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 
-import java.io.IOException;
-import java.util.Objects;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A server endpoint that acts as the communication layer for Ozone containers.
@@ -40,7 +41,7 @@ public abstract class XceiverServer implements XceiverServerSpi {
   private final TokenVerifier tokenVerifier;
 
   public XceiverServer(Configuration conf) {
-    Objects.nonNull(conf);
+    Preconditions.checkNotNull(conf);
     this.secConfig = new SecurityConfig(conf);
     tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient());
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 127f15b..5587488 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -138,6 +138,7 @@ public class ContainerStateMachine extends BaseStateMachine {
    */
   private final CSMMetrics metrics;
 
+  @SuppressWarnings("parameternumber")
   public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
       ThreadPoolExecutor chunkExecutor, XceiverServerRatis ratisServer,
       List<ExecutorService> executors, long expiryInterval,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
index 438692c..c0c719b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -29,6 +29,7 @@ import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
     .newUpdater;
 
@@ -116,7 +117,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
   }
 
   // Logger to log exceptions caught when running listeners.
-  private static final Logger log = Logger
+  private static final Logger LOG = Logger
       .getLogger(AbstractFuture.class.getName());
 
   // A heuristic for timed gets. If the remaining timeout is less than this,
@@ -150,8 +151,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
         // the field is definitely there.
         // For these users fallback to a suboptimal implementation, based on
         // synchronized. This will be a definite performance hit to those users.
-        log.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
-        log.log(
+        LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure);
+        LOG.log(
             Level.SEVERE, "SafeAtomicHelper is broken!",
             atomicReferenceFieldUpdaterFailure);
         helper = new SynchronizedHelper();
@@ -162,12 +163,14 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
     // Prevent rare disastrous classloading in first call to LockSupport.park.
     // See: https://bugs.openjdk.java.net/browse/JDK-8074773
     @SuppressWarnings("unused")
+    @SuppressFBWarnings
     Class<?> ensureLoaded = LockSupport.class;
   }
 
   /**
    * Waiter links form a Treiber stack, in the {@link #waiters} field.
    */
+  @SuppressWarnings("visibilitymodifier")
   private static final class Waiter {
     static final Waiter TOMBSTONE = new Waiter(false /* ignored param */);
 
@@ -252,6 +255,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
   /**
    * Listeners also form a stack through the {@link #listeners} field.
    */
+  @SuppressWarnings("visibilitymodifier")
   private static final class Listener {
     static final Listener TOMBSTONE = new Listener(null, null);
     final Runnable task;
@@ -276,16 +280,17 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
    * A special value to represent failure, when {@link #setException} is
    * called successfully.
    */
+  @SuppressWarnings("visibilitymodifier")
   private static final class Failure {
     static final Failure FALLBACK_INSTANCE =
         new Failure(
-            new Throwable("Failure occurred while trying to finish a future" +
-                ".") {
-              @Override
-              public synchronized Throwable fillInStackTrace() {
-                return this; // no stack trace
-              }
-            });
+          new Throwable("Failure occurred while trying to finish a future" +
+              ".") {
+            @Override
+            public synchronized Throwable fillInStackTrace() {
+              return this; // no stack trace
+            }
+          });
     final Throwable exception;
 
     Failure(Throwable exception) {
@@ -296,6 +301,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
   /**
    * A special value to represent cancellation and the 'wasInterrupted' bit.
    */
+  @SuppressWarnings("visibilitymodifier")
   private static final class Cancellation {
     final boolean wasInterrupted;
     @Nullable final Throwable cause;
@@ -309,6 +315,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
   /**
    * A special value that encodes the 'setFuture' state.
    */
+  @SuppressWarnings("visibilitymodifier")
   private static final class SetFuture<V> implements Runnable {
     final AbstractFuture<V> owner;
     final ListenableFuture<? extends V> future;
@@ -711,8 +718,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
    * @param value the value to be used as the result
    * @return true if the attempt was accepted, completing the {@code Future}
    */
-  protected boolean set(@Nullable V value) {
-    Object valueToSet = value == null ? NULL : value;
+  protected boolean set(@Nullable V val) {
+    Object valueToSet = value == null ? NULL : val;
     if (ATOMIC_HELPER.casValue(this, null, valueToSet)) {
       complete(this);
       return true;
@@ -769,13 +776,14 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
    * @since 19.0
    */
   @Beta
+  @SuppressWarnings("deadstore")
   protected boolean setFuture(ListenableFuture<? extends V> future) {
     checkNotNull(future);
     Object localValue = value;
     if (localValue == null) {
       if (future.isDone()) {
-        Object value = getFutureValue(future);
-        if (ATOMIC_HELPER.casValue(this, null, value)) {
+        Object val = getFutureValue(future);
+        if (ATOMIC_HELPER.casValue(this, null, val)) {
           complete(this);
           return true;
         }
@@ -950,10 +958,8 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
     do {
       head = waiters;
     } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE));
-    for (
-        Waiter currentWaiter = head;
-        currentWaiter != null;
-        currentWaiter = currentWaiter.next) {
+    for (Waiter currentWaiter = head;
+         currentWaiter != null; currentWaiter = currentWaiter.next) {
       currentWaiter.unpark();
     }
   }
@@ -995,7 +1001,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
       // Log it and keep going -- bad runnable and/or executor. Don't punish
       // the other runnables if we're given a bad one. We only catch
       // RuntimeException because we want Errors to propagate up.
-      log.log(
+      LOG.log(
           Level.SEVERE,
           "RuntimeException while executing runnable " + runnable + " with " +
               "executor " + executor,
@@ -1147,6 +1153,7 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
   /**
    * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}.
    */
+  @SuppressWarnings("visibilitymodifier")
   private static final class SafeAtomicHelper extends AtomicHelper {
     final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
     final AtomicReferenceFieldUpdater<Waiter, Waiter> waiterNextUpdater;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 4cf6c3d..ab18273 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -66,6 +66,7 @@ import java.util.UUID;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
+@SuppressWarnings("finalclass")
 public class HddsVolume
     implements Checkable<Boolean, VolumeCheckResult> {
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
index 6df81df..cd08cd2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.server.datanode.checker.AsyncChecker;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Timer;
+
+import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -141,7 +143,7 @@ public class HddsVolumeChecker {
 
     lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
 
-    if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
+    if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
       throw new DiskErrorException("Invalid value configured for "
           + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
           + maxVolumeFailuresTolerated + " "
@@ -310,21 +312,21 @@ public class HddsVolumeChecker {
 
     @Override
     public void onSuccess(@Nonnull VolumeCheckResult result) {
-      switch(result) {
-        case HEALTHY:
-        case DEGRADED:
-          LOG.debug("Volume {} is {}.", volume, result);
-          markHealthy();
-          break;
-        case FAILED:
-          LOG.warn("Volume {} detected as being unhealthy", volume);
-          markFailed();
-          break;
-        default:
-          LOG.error("Unexpected health check result {} for volume {}",
-              result, volume);
-          markHealthy();
-          break;
+      switch (result) {
+      case HEALTHY:
+      case DEGRADED:
+        LOG.debug("Volume {} is {}.", volume, result);
+        markHealthy();
+        break;
+      case FAILED:
+        LOG.warn("Volume {} detected as being unhealthy", volume);
+        markFailed();
+        break;
+      default:
+        LOG.error("Unexpected health check result {} for volume {}",
+            result, volume);
+        markHealthy();
+        break;
       }
       cleanup();
     }
@@ -378,7 +380,8 @@ public class HddsVolumeChecker {
     try {
       delegateChecker.shutdownAndWait(gracePeriod, timeUnit);
     } catch (InterruptedException e) {
-      LOG.warn("{} interrupted during shutdown.", this.getClass().getSimpleName());
+      LOG.warn("{} interrupted during shutdown.",
+          this.getClass().getSimpleName());
       Thread.currentThread().interrupt();
     }
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
index 3be24e4..d1b2569 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
@@ -87,7 +87,8 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
    * the results of the operation.
    * Protected by the object lock.
    */
-  private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>> completedChecks;
+  private final Map<Checkable, ThrottledAsyncChecker.LastCheckResult<V>>
+      completedChecks;
 
   public ThrottledAsyncChecker(final Timer timer,
                                final long minMsBetweenChecks,
@@ -125,7 +126,8 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
     }
 
     if (completedChecks.containsKey(target)) {
-      final ThrottledAsyncChecker.LastCheckResult<V> result = completedChecks.get(target);
+      final ThrottledAsyncChecker.LastCheckResult<V> result =
+          completedChecks.get(target);
       final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
       if (msSinceLastCheck < minMsBetweenChecks) {
         LOG.debug("Skipped checking {}. Time since last check {}ms " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
index a7a492a..626814e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
@@ -94,7 +94,7 @@ final class TimeoutFuture<V> extends AbstractFuture.TrustedFuture<V> {
    */
   private static final class Fire<V> implements Runnable {
     @Nullable
-    TimeoutFuture<V> timeoutFutureRef;
+    private TimeoutFuture<V> timeoutFutureRef;
 
     Fire(
         TimeoutFuture<V> timeoutFuture) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 7addd63..6fba4fb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -18,33 +18,6 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
-
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.ShutdownHookManager;
-import org.apache.hadoop.util.Timer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -53,14 +26,35 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
+import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.apache.hadoop.util.Timer;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * VolumeSet to manage HDDS volumes in a DataNode.
  */
@@ -91,8 +85,8 @@ public class VolumeSet {
   /**
    * An executor for periodic disk checks.
    */
-  final ScheduledExecutorService diskCheckerservice;
-  final ScheduledFuture<?> periodicDiskChecker;
+  private final ScheduledExecutorService diskCheckerservice;
+  private final ScheduledFuture<?> periodicDiskChecker;
 
   private static final long DISK_CHECK_INTERVAL_MINUTES = 15;
 
@@ -125,21 +119,21 @@ public class VolumeSet {
     this.diskCheckerservice = Executors.newScheduledThreadPool(
         1, r -> new Thread(r, "Periodic HDDS volume checker"));
     this.periodicDiskChecker =
-        diskCheckerservice.scheduleWithFixedDelay(() -> {
-            try {
-              checkAllVolumes();
-            } catch (IOException e) {
-              LOG.warn("Exception while checking disks", e);
-            }
-          }, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
-              TimeUnit.MINUTES);
+      diskCheckerservice.scheduleWithFixedDelay(() -> {
+        try {
+          checkAllVolumes();
+        } catch (IOException e) {
+          LOG.warn("Exception while checking disks", e);
+        }
+      }, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
+        TimeUnit.MINUTES);
     initializeVolumeSet();
   }
 
   @VisibleForTesting
-  HddsVolumeChecker getVolumeChecker(Configuration conf)
+  HddsVolumeChecker getVolumeChecker(Configuration configuration)
       throws DiskChecker.DiskErrorException {
-    return new HddsVolumeChecker(conf, new Timer());
+    return new HddsVolumeChecker(configuration, new Timer());
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 687a12d..5cb218c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -18,38 +18,34 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import com.google.common.collect.Iterables;
-import org.apache.commons.io.FileUtils;
-import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.server.datanode.checker.AsyncChecker;
-import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.Timer;
-import org.junit.After;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.IOException;
-import java.net.BindException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.Timer;
+
+import com.google.common.collect.Iterables;
+import org.apache.commons.io.FileUtils;
+import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.hamcrest.CoreMatchers.is;
+import org.junit.After;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -66,7 +62,7 @@ public class TestVolumeSetDiskChecks {
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  Configuration conf = null;
+  private Configuration conf = null;
 
   /**
    * Cleanup volume directories.
@@ -117,14 +113,15 @@ public class TestVolumeSetDiskChecks {
     final VolumeSet volumeSet = new VolumeSet(
         UUID.randomUUID().toString(), conf) {
       @Override
-      HddsVolumeChecker getVolumeChecker(Configuration conf)
+      HddsVolumeChecker getVolumeChecker(Configuration configuration)
           throws DiskErrorException {
-        return new DummyChecker(conf, new Timer(), numBadVolumes);
+        return new DummyChecker(configuration, new Timer(), numBadVolumes);
       }
     };
 
     assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes));
-    assertThat(volumeSet.getVolumesList().size(), is(numVolumes - numBadVolumes));
+    assertThat(volumeSet.getVolumesList().size(),
+        is(numVolumes - numBadVolumes));
   }
 
   /**
@@ -139,9 +136,9 @@ public class TestVolumeSetDiskChecks {
     final VolumeSet volumeSet = new VolumeSet(
         UUID.randomUUID().toString(), conf) {
       @Override
-      HddsVolumeChecker getVolumeChecker(Configuration conf)
+      HddsVolumeChecker getVolumeChecker(Configuration configuration)
           throws DiskErrorException {
-        return new DummyChecker(conf, new Timer(), numVolumes);
+        return new DummyChecker(configuration, new Timer(), numVolumes);
       }
     };
   }
@@ -153,13 +150,13 @@ public class TestVolumeSetDiskChecks {
    * @param numDirs
    */
   private Configuration getConfWithDataNodeDirs(int numDirs) {
-    final Configuration conf = new OzoneConfiguration();
+    final Configuration ozoneConf = new OzoneConfiguration();
     final List<String> dirs = new ArrayList<>();
     for (int i = 0; i < numDirs; ++i) {
       dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
     }
-    conf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
-    return conf;
+    ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
+    return ozoneConf;
   }
 
   /**
@@ -169,7 +166,7 @@ public class TestVolumeSetDiskChecks {
   static class DummyChecker extends HddsVolumeChecker {
     private final int numBadVolumes;
 
-    public DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
+    DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
         throws DiskErrorException {
       super(conf, timer);
       this.numBadVolumes = numBadVolumes;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
index e9443b1..e3ae56a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -167,10 +168,10 @@ public class TestKeyValueHandlerWithUnhealthyContainer {
    * @param cmdType type of the container command.
    * @return
    */
-  private ContainerProtos.ContainerCommandRequestProto getDummyCommandRequestProto(
+  private ContainerCommandRequestProto getDummyCommandRequestProto(
       ContainerProtos.Type cmdType) {
-    final ContainerProtos.ContainerCommandRequestProto.Builder builder =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
+    final ContainerCommandRequestProto.Builder builder =
+        ContainerCommandRequestProto.newBuilder()
             .setCmdType(cmdType)
             .setContainerID(DUMMY_CONTAINER_ID)
             .setDatanodeUuid(DATANODE_UUID);
@@ -190,36 +191,39 @@ public class TestKeyValueHandlerWithUnhealthyContainer {
                 .build())
             .build();
 
-    switch(cmdType) {
-      case ReadContainer:
-        builder.setReadContainer(ContainerProtos.ReadContainerRequestProto.newBuilder().build());
-        break;
-      case GetBlock:
-        builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
-            .setBlockID(fakeBlockId).build());
-        break;
-      case GetCommittedBlockLength:
-        builder.setGetCommittedBlockLength(
-            ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
-                .setBlockID(fakeBlockId).build());
-      case ReadChunk:
-        builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
-            .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
-        break;
-      case DeleteChunk:
-        builder.setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
-            .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
-        break;
-      case GetSmallFile:
-        builder.setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
-            .setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
-                .setBlockID(fakeBlockId)
-                .build())
-            .build());
-        break;
-
-      default:
-        Assert.fail("Unhandled request type " + cmdType + " in unit test");
+    switch (cmdType) {
+    case ReadContainer:
+      builder.setReadContainer(
+          ContainerProtos.ReadContainerRequestProto.newBuilder().build());
+      break;
+    case GetBlock:
+      builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
+          .setBlockID(fakeBlockId).build());
+      break;
+    case GetCommittedBlockLength:
+      builder.setGetCommittedBlockLength(
+          ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder()
+              .setBlockID(fakeBlockId).build());
+    case ReadChunk:
+      builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder()
+          .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
+      break;
+    case DeleteChunk:
+      builder
+          .setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
+              .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
+      break;
+    case GetSmallFile:
+      builder
+          .setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
+              .setBlock(ContainerProtos.GetBlockRequestProto.newBuilder()
+                  .setBlockID(fakeBlockId)
+                  .build())
+              .build());
+      break;
+
+    default:
+      Assert.fail("Unhandled request type " + cmdType + " in unit test");
     }
 
     return builder.build();
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 1d39476..a4d77ac 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
+        <version>3.0.4</version>
         <configuration>
           <excludeFilterFile combine.self="override"></excludeFilterFile>
         </configuration>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index 5e596b5..3029f70 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -261,15 +261,13 @@ public final class RatisPipelineUtils {
 
     for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
         .values()) {
-      while (true) {
-        try {
-          pipelineManager.createPipeline(type, factor);
-        } catch (IOException ioe) {
-          break;
-        } catch (Throwable t) {
-          LOG.error("Error while creating pipelines {}", t);
-          break;
-        }
+      try {
+        pipelineManager.createPipeline(type, factor);
+      } catch (IOException ioe) {
+        break;
+      } catch (Throwable t) {
+        LOG.error("Error while creating pipelines {}", t);
+        break;
       }
     }
     isPipelineCreatorRunning.set(false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c35475f..0d4d610 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -139,6 +139,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>findbugs</artifactId>
+      <version>3.0.1</version>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
index ec2b656..06867fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
@@ -29,6 +29,7 @@ import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
     .newUpdater;
 
diff --git a/hadoop-ozone/Jenkinsfile b/hadoop-ozone/Jenkinsfile
index 3ecd6f2..0055486 100644
--- a/hadoop-ozone/Jenkinsfile
+++ b/hadoop-ozone/Jenkinsfile
@@ -17,30 +17,26 @@
  */
 node("ubuntu") {
     docker.image('elek/ozone-build').pull()
-    docker.image('elek/ozone-build').inside {
+    docker.image('elek/ozone-build').inside("--privileged") {
 
         stage('Checkout') {
             checkout scm
+            //use this for external Jenkinsfile builds
+            //checkout poll: false, scm: [$class: 'GitSCM', branches: [[name: env.branch]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: 'github-token', url: "https://github.com/${organization}/${repository}.git"]]]
+
         }
 
         stage('Clean') {
-            status = sh returnStatus: true, script: 'mvn clean'
+            status = sh returnStatus: true, script: 'mvn clean -P hdds -am -pl :hadoop-ozone-dist '
         }
 
         stageRunner('Author', "author", {})
 
-        stageRunner('Isolation', "isolation", {})
-
-
-        stageRunner('Build', "build", {})
-
         stageRunner('Licence', "rat", {
             archiveArtifacts 'target/rat-aggregated.txt'
         }, 'artifact/target/rat-aggregated.txt/*view*/')
 
-        stageRunner('Unit test', "unit", {
-            junit '**/target/surefire-reports/*.xml'
-        }, 'testReport/')
+        stageRunner('Build', "build", {})
 
         stageRunner('Findbugs', "findbugs", {
             archiveArtifacts 'target/findbugs-all.txt'
@@ -48,9 +44,17 @@ node("ubuntu") {
         }, 'artifact/target/findbugs-all.txt/*view*/')
 
         stageRunner('Checkstyle', "checkstyle", {
-            checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-result.xml', unHealthy: ''
+            checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-errors.xml', unHealthy: ''
         }, 'checkstyleResult')
 
+        stageRunner('Acceptance', "acceptance", {
+             archiveArtifacts 'hadoop-ozone/dist/target/ozone-0.4.0-SNAPSHOT/smoketest/result/**'
+        })
+
+        stageRunner('Unit test', "unit", {
+            junit '**/target/surefire-reports/*.xml'
+        }, 'testReport/')
+
     }
 
 }
@@ -70,35 +74,42 @@ def stageRunner(name, type, processResult, url = '') {
     }
 }
 
+def githubStatus(name, status, description, url='') {
+  commitId = sh(returnStdout: true, script: 'git rev-parse HEAD')
+  context = 'ci/ozone/' + name
+  if (url) {
+    githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status, targetUrl: url
+  } else {
+    githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status
+  }
+}
 def prStatusStart(name) {
-    if (env.CHANGE_ID) {
-        pullRequest.createStatus(status: "pending",
-                context: 'continuous-integration/jenkins/pr-merge/' + name,
-                description: name + " is started")
-    }
+       githubStatus(name,
+                     "PENDING",
+                     name + " is started")
+
+
 }
 
 def prStatusResult(responseCode, name, url = '') {
-    status = "error"
+    status = "ERROR"
     desc = "failed"
     if (responseCode == 0) {
-        status = "success"
+        status = "SUCCESS"
         desc = "passed"
     }
-    message = name + " is " + desc
-    //System.out.println(responseCode)
-    if (env.CHANGE_ID) {
+    message = name + " check is " + desc
         if (url) {
-            pullRequest.createStatus(status: status,
-                    context: 'continuous-integration/jenkins/pr-merge/' + name,
-                    description: message,
-                    targetUrl: env.BUILD_URL + url)
+            githubStatus(name,
+                          status,
+                          message,
+                          env.BUILD_URL + url)
         } else {
-            pullRequest.createStatus(status: status,
-                    context: 'continuous-integration/jenkins/pr-merge/' + name,
-                    description: message)
+            githubStatus(name,
+                          status,
+                          message)
         }
-    }
+
     if (responseCode != 0) {
         throw new RuntimeException(message)
     }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
index 9c07cf6..7ba1766 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.ozone.client.io;
 
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
@@ -27,15 +31,10 @@ import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.List;
-
 /**
  * Helper class used inside {@link BlockOutputStream}.
  * */
-public class BlockOutputStreamEntry extends OutputStream {
+public final class BlockOutputStreamEntry extends OutputStream {
 
   private OutputStream outputStream;
   private BlockID blockID;
@@ -56,6 +55,7 @@ public class BlockOutputStreamEntry extends OutputStream {
   private final long watchTimeout;
   private List<ByteBuffer> bufferList;
 
+  @SuppressWarnings("parameternumber")
   private BlockOutputStreamEntry(BlockID blockID, String key,
       XceiverClientManager xceiverClientManager,
       Pipeline pipeline, String requestId, int chunkSize,
@@ -137,56 +137,48 @@ public class BlockOutputStreamEntry extends OutputStream {
       this.outputStream.close();
       // after closing the chunkOutPutStream, blockId would have been
       // reconstructed with updated bcsId
-      if (this.outputStream instanceof BlockOutputStream) {
-        this.blockID = ((BlockOutputStream) outputStream).getBlockID();
-      }
+      this.blockID = ((BlockOutputStream) outputStream).getBlockID();
     }
   }
 
   long getTotalSuccessfulFlushedData() throws IOException {
-    if (this.outputStream instanceof BlockOutputStream) {
+    if (outputStream != null) {
       BlockOutputStream out = (BlockOutputStream) this.outputStream;
       blockID = out.getBlockID();
       return out.getTotalSuccessfulFlushedData();
-    } else if (outputStream == null) {
-        // For a pre allocated block for which no write has been initiated,
-        // the OutputStream will be null here.
-        // In such cases, the default blockCommitSequenceId will be 0
-        return 0;
+    } else {
+      // For a pre allocated block for which no write has been initiated,
+      // the OutputStream will be null here.
+      // In such cases, the default blockCommitSequenceId will be 0
+      return 0;
     }
-    throw new IOException("Invalid Output Stream for Key: " + key);
   }
 
   long getWrittenDataLength() throws IOException {
-    if (this.outputStream instanceof BlockOutputStream) {
+    if (outputStream != null) {
       BlockOutputStream out = (BlockOutputStream) this.outputStream;
       return out.getWrittenDataLength();
-    } else if (outputStream == null) {
+    } else {
       // For a pre allocated block for which no write has been initiated,
       // the OutputStream will be null here.
       // In such cases, the default blockCommitSequenceId will be 0
       return 0;
     }
-    throw new IOException("Invalid Output Stream for Key: " + key);
   }
 
   void cleanup(boolean invalidateClient) throws IOException {
     checkStream();
-    if (this.outputStream instanceof BlockOutputStream) {
-      BlockOutputStream out = (BlockOutputStream) this.outputStream;
-      out.cleanup(invalidateClient);
-    }
+    BlockOutputStream out = (BlockOutputStream) this.outputStream;
+    out.cleanup(invalidateClient);
+
   }
 
   void writeOnRetry(long len) throws IOException {
     checkStream();
-    if (this.outputStream instanceof BlockOutputStream) {
-      BlockOutputStream out = (BlockOutputStream) this.outputStream;
-      out.writeOnRetry(len);
-      this.currentPosition += len;
-    } else {
-      throw new IOException("Invalid Output Stream for Key: " + key);
-    }
+    BlockOutputStream out = (BlockOutputStream) this.outputStream;
+    out.writeOnRetry(len);
+    this.currentPosition += len;
+
   }
 
   /**
@@ -229,8 +221,8 @@ public class BlockOutputStreamEntry extends OutputStream {
       return this;
     }
 
-    public Builder setPipeline(Pipeline pipeline) {
-      this.pipeline = pipeline;
+    public Builder setPipeline(Pipeline ppln) {
+      this.pipeline = ppln;
       return this;
     }
 
@@ -264,8 +256,8 @@ public class BlockOutputStreamEntry extends OutputStream {
       return this;
     }
 
-    public Builder setBufferList(List<ByteBuffer> bufferList) {
-      this.bufferList = bufferList;
+    public Builder setBufferList(List<ByteBuffer> bffrLst) {
+      this.bufferList = bffrLst;
       return this;
     }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
index 92eb150..a69740f 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
@@ -17,8 +17,6 @@
 
 package org.apache.hadoop.ozone.client.io;
 
-import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
-
 import java.io.IOException;
 import java.io.InputStream;
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 19a2541..a6e77e2 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.om.helpers;
 
-import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 7aec0e2..27a85d7 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -221,7 +221,6 @@ public final class OmKeyInfo extends WithMetadata {
     private long modificationTime;
     private HddsProtos.ReplicationType type;
     private HddsProtos.ReplicationFactor factor;
-    private boolean isMultipartKey;
     private Map<String, String> metadata;
 
     public Builder() {
@@ -275,11 +274,6 @@ public final class OmKeyInfo extends WithMetadata {
       return this;
     }
 
-    public Builder setIsMultipartKey(boolean isMultipart) {
-      this.isMultipartKey = isMultipart;
-      return this;
-    }
-
     public Builder addMetadata(String key, String value) {
       metadata.put(key, value);
       return this;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java
index 597e535..5c49a15 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java
@@ -25,6 +25,7 @@ import java.util.Map;
  */
 public class WithMetadata {
 
+  @SuppressWarnings("visibilitymodifier")
   protected Map<String, String> metadata = new HashMap<>();
 
   /**
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
index 12be4c9..1beb7fa 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
@@ -60,6 +60,7 @@ public abstract class OzoneSecretManager<T extends TokenIdentifier>
   private OzoneSecretKey currentKey;
   private AtomicInteger currentKeyId;
   private AtomicInteger tokenSequenceNumber;
+  @SuppressWarnings("visibilitymodifier")
   protected final Map<Integer, OzoneSecretKey> allKeys;
 
   /**
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh
index 45a4ad5..1328492 100755
--- a/hadoop-ozone/dev-support/checks/findbugs.sh
+++ b/hadoop-ozone/dev-support/checks/findbugs.sh
@@ -22,9 +22,8 @@ touch "$FINDBUGS_ALL_FILE"
 
 mvn -fn findbugs:check -Dfindbugs.failOnError=false  -am -pl :hadoop-ozone-dist -Phdds
 
-find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
-find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText >> "${FINDBUGS_ALL_FILE}"
-
+find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a "${FINDBUGS_ALL_FILE}"
+find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a "${FINDBUGS_ALL_FILE}"
 
 bugs=$(cat "$FINDBUGS_ALL_FILE" | wc -l)
 
@@ -32,4 +31,4 @@ if [[ ${bugs} -gt 0 ]]; then
    exit -1
 else
    exit 0
-fi
\ No newline at end of file
+fi
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/auditparser.robot b/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot
similarity index 100%
rename from hadoop-ozone/dist/src/main/smoketest/basic/auditparser.robot
rename to hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot
diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh b/hadoop-ozone/dist/src/main/smoketest/test.sh
index 5e7462a..e06deac 100755
--- a/hadoop-ozone/dist/src/main/smoketest/test.sh
+++ b/hadoop-ozone/dist/src/main/smoketest/test.sh
@@ -35,7 +35,7 @@ wait_for_datanodes(){
 
      #This line checks the number of HEALTHY datanodes registered in scm over the
      # jmx HTTP servlet
-     datanodes=$(docker-compose -f "$1" exec scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
+     datanodes=$(docker-compose -f "$1" exec -T scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
       if [[ "$datanodes" == "3" ]]; then
 
         #It's up and running. Let's return from the function.
@@ -51,7 +51,6 @@ wait_for_datanodes(){
 
       sleep 2
    done
-
    echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
 }
 
@@ -73,11 +72,13 @@ execute_tests(){
   docker-compose -f "$COMPOSE_FILE" down
   docker-compose -f "$COMPOSE_FILE" up -d --scale datanode=3
   wait_for_datanodes "$COMPOSE_FILE"
+  #TODO: we need to wait for the OM here
+  sleep 10
   for TEST in "${TESTS[@]}"; do
      TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
      set +e
      OUTPUT_NAME="$COMPOSE_DIR-${TEST//\//_}"
-	  docker-compose -f "$COMPOSE_FILE" exec ozoneManager python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
+	  docker-compose -f "$COMPOSE_FILE" exec -T ozoneManager python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
      set -e
      docker-compose -f "$COMPOSE_FILE" logs > "$DIR/$RESULT_DIR/docker-$OUTPUT_NAME.log"
   done
@@ -140,12 +141,14 @@ if [ "$RUN_ALL" = true ]; then
 #
 # We select the test suites and execute them on multiple type of clusters
 #
-   DEFAULT_TESTS=("basic")
-   execute_tests ozone "${DEFAULT_TESTS[@]}"
+   TESTS=("basic")
+   execute_tests ozone "${TESTS[@]}"
+   TESTS=("audiparser")
+   execute_tests ozone "${TESTS[@]}"
    TESTS=("ozonefs")
    execute_tests ozonefs "${TESTS[@]}"
-   TESTS=("ozone-hdfs")
-   execute_tests ozone-hdfs "${DEFAULT_TESTS[@]}"
+   TESTS=("basic")
+   execute_tests ozone-hdfs "${TESTS[@]}"
    TESTS=("s3")
    execute_tests ozones3 "${TESTS[@]}"
 else
@@ -153,4 +156,4 @@ else
 fi
 
 #Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output)
-docker run --rm -it -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d "smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"
+docker run --rm -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d "smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index fe04b56..4e77bfd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -28,9 +28,6 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.UUID;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.client.OzoneQuota;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -83,6 +80,9 @@ import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Time;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.either;
 import org.junit.Assert;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 4827b02..da284f5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -76,7 +76,7 @@ public class TestReadRetries {
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
-  private static String SCM_ID = UUID.randomUUID().toString();
+  private static final String SCM_ID = UUID.randomUUID().toString();
 
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index f4b089b..4ebbf1c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -78,7 +78,8 @@ public class TestContainerDeletionChoosingPolicy {
       KeyValueContainer container = new KeyValueContainer(data, conf);
       containerSet.addContainer(container);
       Assert.assertTrue(
-          containerSet.getContainerMapCopy().containsKey(data.getContainerID()));
+          containerSet.getContainerMapCopy()
+              .containsKey(data.getContainerID()));
     }
 
     ContainerDeletionChoosingPolicy deletionPolicy =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 140ca24..887c35a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -154,7 +154,8 @@ public class TestSecureContainerServer {
     XceiverClientSpi client = null;
     String containerName = OzoneUtils.getRequestID();
     try {
-      final Pipeline pipeline = ContainerTestHelper.createPipeline(numDatanodes);
+      final Pipeline pipeline =
+          ContainerTestHelper.createPipeline(numDatanodes);
 
       initConf.accept(pipeline, CONF);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java
index caa7674..630d98a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java
@@ -101,6 +101,10 @@ public final class OMNodeDetails {
     return ratisPort;
   }
 
+  public int getRpcPort() {
+    return rpcPort;
+  }
+
   public String getRpcAddressString() {
     return NetUtils.getHostPortString(rpcAddress);
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
index be4bf59..2cac258 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
@@ -176,6 +176,8 @@ public final class OzoneManagerRatisServer {
     }
   }
 
+  //TODO simplify it to make it shorter
+  @SuppressWarnings("methodlength")
   private RaftProperties newRaftProperties(Configuration conf) {
     final RaftProperties properties = new RaftProperties();
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 35010ab..831873d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -131,7 +131,7 @@ public class OzoneManagerRequestHandler {
     this.impl = om;
   }
 
-  //TODO: use map to make shorted methods
+  //TODO simplify it to make it shorter
   @SuppressWarnings("methodlength")
   public OMResponse handle(OMRequest request) {
     LOG.debug("Received OMRequest: {}, ", request);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java
index 8478c58..a025e24 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.security.token.Token;
 import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 
@@ -63,7 +64,8 @@ public class CancelTokenHandler extends Handler {
     }
     Token token = new Token();
     token.decodeFromUrlString(
-        new String(Files.readAllBytes(Paths.get(tokenFile))));
+        new String(Files.readAllBytes(Paths.get(tokenFile)),
+            StandardCharsets.UTF_8));
     client.getObjectStore().cancelDelegationToken(token);
     return null;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java
index 3b25ad8..93e4c24 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.security.token.Token;
 import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 
@@ -58,7 +59,8 @@ public class PrintTokenHandler extends Handler {
       return null;
     }
 
-    String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)));
+    String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)),
+        StandardCharsets.UTF_8);
     Token token = new Token();
     token.decodeFromUrlString(encodedToken);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java
index c5ff24c..faf74ae 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.security.token.Token;
 import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 
@@ -63,7 +64,8 @@ public class RenewTokenHandler extends Handler {
     }
     Token token = new Token();
     token.decodeFromUrlString(
-        new String(Files.readAllBytes(Paths.get(tokenFile))));
+        new String(Files.readAllBytes(Paths.get(tokenFile)),
+            StandardCharsets.UTF_8));
     long expiryTime = client.getObjectStore().renewDelegationToken(token);
 
     System.out.printf("Token renewed successfully, expiry time: %s",
diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml
index b4b7636..51e9192 100644
--- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml
+++ b/hadoop-ozone/ozonefs-lib-legacy/pom.xml
@@ -91,6 +91,13 @@
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 
diff --git a/hadoop-ozone/ozonefs-lib/pom.xml b/hadoop-ozone/ozonefs-lib/pom.xml
index c8c5c75..b1cbaf8 100644
--- a/hadoop-ozone/ozonefs-lib/pom.xml
+++ b/hadoop-ozone/ozonefs-lib/pom.xml
@@ -76,6 +76,13 @@
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 7da5efe..a3681d6 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -133,6 +133,13 @@
       <scope>provided</scope>
     </dependency>
     <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>findbugs</artifactId>
+      <version>3.0.1</version>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <scope>test</scope>
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
index cf1c127..ca051dc 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.List;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -38,6 +39,7 @@ public final class OzoneClientAdapterFactory {
   private OzoneClientAdapterFactory() {
   }
 
+  @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED")
   public static OzoneClientAdapter createAdapter(
       String volumeStr,
       String bucketStr, OzoneFSStorageStatistics storageStatistics)
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 6330d8f..5337f2e 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -207,7 +207,7 @@ public class OzoneFileSystem extends FileSystem {
                                    short replication, long blockSize,
                                    Progressable progress) throws IOException {
     LOG.trace("create() path:{}", f);
-     storageStatistics.incrementCounter(Statistic.INVOCATION_CREATE, 1);
+    storageStatistics.incrementCounter(Statistic.INVOCATION_CREATE, 1);
     statistics.incrementWriteOps(1);
     final String key = pathToKey(f);
     final FileStatus status;
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index a3f761a..264b405 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -308,6 +308,7 @@
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
+        <version>3.0.4</version>
         <configuration>
           <excludeFilterFile combine.self="override"/>
         </configuration>
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index aeff0f7..6838add 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -71,6 +71,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <version>1.19</version>
     </dependency>
     <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>findbugs</artifactId>
+      <version>3.0.1</version>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-integration-test</artifactId>
       <scope>test</scope>
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 67df0f9..87029fa 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Supplier;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import io.opentracing.Scope;
 import io.opentracing.util.GlobalTracer;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -552,6 +553,7 @@ public final class RandomKeyGenerator implements Callable<Void> {
     }
 
     @Override
+    @SuppressFBWarnings("REC_CATCH_EXCEPTION")
     public void run() {
       LOG.trace("Creating volume: {}", volumeName);
       long start = System.nanoTime();
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
index 83bbc83..cf15e1f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
@@ -1,18 +1,19 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package org.apache.hadoop.ozone.fsck;
 
@@ -79,4 +80,4 @@ public class BlockIdDetails {
   public int hashCode() {
     return Objects.hash(bucketName, blockVol, keyName);
   }
-}
\ No newline at end of file
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org