You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2019/03/04 08:00:15 UTC

[hadoop] branch trunk updated (9e53088 -> bd8d299)

This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from 9e53088  Revert "HDDS-1183. Override getDelegationToken API for OzoneFileSystem. Contr…" (#544)
     new fc17ba1  HDFS-14272. [SBN read] Make ObserverReadProxyProvider initialize its state ID against the active NN on startup. Contributed by Erik Krogen.
     new b18c1c2  Revert "HDDS-1072. Implement RetryProxy and FailoverProxy for OM client."
     new e20b5ef  YARN-9332. RackResolver tool should accept multiple hosts. Contributed by Lantao Jin.
     new 6c4d566  Revert "HDFS-14261. Kerberize JournalNodeSyncer unit test. Contributed by Siyao Meng."
     new bd8d299  YARN-7477. Moving logging APIs over to slf4j in hadoop-yarn-common. Contributed by Prabhu Joseph.

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |  17 --
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   3 -
 .../common/src/main/resources/ozone-default.xml    |  43 +---
 .../namenode/ha/ObserverReadProxyProvider.java     |  34 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java    |   3 +-
 .../hdfs/qjournal/server/TestJournalNodeSync.java  |  90 +------
 .../namenode/ha/TestConsistentReadsObserver.java   |  73 +++++-
 .../ozone/client/protocol/ClientProtocol.java      |   4 +-
 .../hadoop/ozone/client/rest/RestClient.java       |   4 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  18 +-
 .../hadoop/ozone/client/rpc/ha/OMProxyInfo.java    |  38 ++-
 .../ozone/client/rpc/ha/OMProxyProvider.java       | 177 ++++++++++++++
 .../hadoop/ozone/client/rpc}/ha/package-info.java  |   2 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       | 266 ---------------------
 .../ozone/om/protocol/OzoneManagerProtocol.java    |   7 -
 ...OzoneManagerProtocolClientSideTranslatorPB.java | 105 +-------
 .../src/main/proto/OzoneManagerProtocol.proto      |   2 -
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       |  38 ++-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |   8 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 184 +++-----------
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   6 -
 .../hadoop/ozone/om/ratis/OMRatisHelper.java       |   9 +-
 .../ozone/om/ratis/OzoneManagerRatisClient.java    |  27 +--
 ...OzoneManagerProtocolServerSideTranslatorPB.java |   3 +-
 .../om/ratis/TestOzoneManagerRatisServer.java      |  23 ++
 .../yarn/FileSystemBasedConfigurationProvider.java |   8 +-
 .../hadoop/yarn/YarnUncaughtExceptionHandler.java  |  14 +-
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   |   7 +-
 .../org/apache/hadoop/yarn/client/AHSProxy.java    |   7 +-
 .../apache/hadoop/yarn/client/ClientRMProxy.java   |   7 +-
 .../client/ConfiguredRMFailoverProxyProvider.java  |   8 +-
 .../org/apache/hadoop/yarn/client/RMProxy.java     |   7 +-
 .../RequestHedgingRMFailoverProxyProvider.java     |   8 +-
 .../yarn/client/api/impl/DirectTimelineWriter.java |   8 +-
 .../client/api/impl/FileSystemTimelineWriter.java  |  18 +-
 .../yarn/client/api/impl/TimelineClientImpl.java   |   7 +-
 .../yarn/client/api/impl/TimelineConnector.java    |   7 +-
 .../client/api/impl/TimelineReaderClientImpl.java  |   8 +-
 .../yarn/client/api/impl/TimelineV2ClientImpl.java |   7 +-
 .../yarn/client/api/impl/TimelineWriter.java       |   8 +-
 .../apache/hadoop/yarn/event/AsyncDispatcher.java  |  13 +-
 .../apache/hadoop/yarn/event/EventDispatcher.java  |  13 +-
 .../factories/impl/pb/RpcClientFactoryPBImpl.java  |   8 +-
 .../factories/impl/pb/RpcServerFactoryPBImpl.java  |   7 +-
 .../apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java |   7 +-
 .../java/org/apache/hadoop/yarn/ipc/YarnRPC.java   |   7 +-
 .../AggregatedLogDeletionService.java              |   7 +-
 .../LogAggregationFileControllerFactory.java       |   6 +-
 .../tfile/LogAggregationTFileController.java       |   6 +-
 .../yarn/nodelabels/CommonNodeLabelsManager.java   |   7 +-
 .../yarn/nodelabels/FileSystemNodeLabelsStore.java |   8 +-
 .../nodelabels/NonAppendableFSNodeLabelStore.java  |   8 +-
 .../yarn/nodelabels/store/AbstractFSNodeStore.java |   7 +-
 .../hadoop/yarn/security/AMRMTokenIdentifier.java  |   7 +-
 .../hadoop/yarn/security/AMRMTokenSelector.java    |   8 +-
 .../hadoop/yarn/security/AdminACLsManager.java     |   7 +-
 .../yarn/security/ContainerTokenIdentifier.java    |   7 +-
 .../yarn/security/ContainerTokenSelector.java      |   8 +-
 .../hadoop/yarn/security/NMTokenIdentifier.java    |   7 +-
 .../hadoop/yarn/security/NMTokenSelector.java      |   8 +-
 .../yarn/security/YarnAuthorizationProvider.java   |   7 +-
 .../security/client/ClientToAMTokenSelector.java   |   8 +-
 .../security/client/RMDelegationTokenSelector.java |   8 +-
 .../client/TimelineDelegationTokenSelector.java    |   8 +-
 .../server/security/ApplicationACLsManager.java    |   8 +-
 .../yarn/util/AbstractLivelinessMonitor.java       |   7 +-
 .../apache/hadoop/yarn/util/AdHocLogDumper.java    | 123 +++++-----
 .../org/apache/hadoop/yarn/util/FSDownload.java    |   7 +-
 .../hadoop/yarn/util/ProcfsBasedProcessTree.java   |  18 +-
 .../org/apache/hadoop/yarn/util/RackResolver.java  |  74 +++++-
 .../hadoop/yarn/util/ResourceCalculatorPlugin.java |   8 +-
 .../yarn/util/ResourceCalculatorProcessTree.java   |   8 +-
 .../java/org/apache/hadoop/yarn/util/Times.java    |   7 +-
 .../hadoop/yarn/util/WindowsBasedProcessTree.java  |   8 +-
 .../apache/hadoop/yarn/util/YarnVersionInfo.java   |   7 +-
 .../util/resource/DefaultResourceCalculator.java   |   8 +-
 .../util/resource/DominantResourceCalculator.java  |   7 +-
 .../hadoop/yarn/util/resource/Resources.java       |   8 +-
 .../yarn/webapp/GenericExceptionHandler.java       |   8 +-
 .../apache/hadoop/yarn/TestContainerLaunchRPC.java |   9 +-
 .../yarn/TestContainerResourceIncreaseRPC.java     |   8 +-
 .../hadoop/yarn/api/BasePBImplRecordsTest.java     |   7 +-
 .../api/records/timeline/TestTimelineRecords.java  |   8 +-
 .../TestTimelineServiceRecords.java                |   8 +-
 .../api/impl/TestTimelineClientForATS1_5.java      |   8 +-
 .../client/api/impl/TestTimelineClientV2Impl.java  |   8 +-
 .../apache/hadoop/yarn/event/InlineDispatcher.java |   7 +-
 .../logaggregation/TestAggregatedLogFormat.java    |   8 +-
 .../hadoop/yarn/util/TestAdHocLogDumper.java       |  31 +--
 .../apache/hadoop/yarn/util/TestFSDownload.java    |   7 +-
 .../util/TestLog4jWarningErrorMetricsAppender.java |  32 ++-
 .../yarn/util/TestProcfsBasedProcessTree.java      |   8 +-
 .../apache/hadoop/yarn/util/TestRackResolver.java  |  67 +++++-
 .../yarn/util/TestWindowsBasedProcessTree.java     |   8 +-
 94 files changed, 936 insertions(+), 1094 deletions(-)
 copy hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitorWithDedicatedHealthAddress.java => hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyInfo.java (56%)
 create mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyProvider.java
 rename hadoop-ozone/{common/src/main/java/org/apache/hadoop/ozone/om => client/src/main/java/org/apache/hadoop/ozone/client/rpc}/ha/package-info.java (94%)
 delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/05: HDFS-14272. [SBN read] Make ObserverReadProxyProvider initialize its state ID against the active NN on startup. Contributed by Erik Krogen.

Posted by aa...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit fc17ba172bde2aeea98a84f9a8cd104a2fada673
Author: Erik Krogen <xk...@apache.org>
AuthorDate: Fri Mar 1 12:58:55 2019 -0800

    HDFS-14272. [SBN read] Make ObserverReadProxyProvider initialize its state ID against the active NN on startup. Contributed by Erik Krogen.
---
 .../namenode/ha/ObserverReadProxyProvider.java     | 34 ++++++++++
 .../hdfs/server/namenode/NameNodeRpcServer.java    |  3 +-
 .../namenode/ha/TestConsistentReadsObserver.java   | 73 +++++++++++++++++++++-
 3 files changed, 107 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 3cf14cb..a17c640 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -88,6 +88,15 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
   private boolean observerReadEnabled;
 
   /**
+   * A client using an ObserverReadProxyProvider should first sync with the
+   * active NameNode on startup. This ensures that the client reads data which
+   * is consistent with the state of the world as of the time of its
+   * instantiation. This variable will be true after this initial sync has
+   * been performed.
+   */
+  private volatile boolean msynced = false;
+
+  /**
    * The index into the nameNodeProxies list currently being used. Should only
    * be accessed in synchronized methods.
    */
@@ -225,6 +234,22 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
   }
 
   /**
+   * This will call {@link ClientProtocol#msync()} on the active NameNode
+   * (via the {@link #failoverProxy}) to initialize the state of this client.
+   * Calling it multiple times is a no-op; only the first will perform an
+   * msync.
+   *
+   * @see #msynced
+   */
+  private synchronized void initializeMsync() throws IOException {
+    if (msynced) {
+      return; // No need for an msync
+    }
+    failoverProxy.getProxy().proxy.msync();
+    msynced = true;
+  }
+
+  /**
    * An InvocationHandler to handle incoming requests. This class's invoke
    * method contains the primary logic for redirecting to observers.
    *
@@ -244,6 +269,12 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
       Object retVal;
 
       if (observerReadEnabled && isRead(method)) {
+        if (!msynced) {
+          // An msync() must first be performed to ensure that this client is
+          // up-to-date with the active's state. This will only be done once.
+          initializeMsync();
+        }
+
         int failedObserverCount = 0;
         int activeCount = 0;
         int standbyCount = 0;
@@ -315,6 +346,9 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
         // This exception will be handled by higher layers
         throw e.getCause();
       }
+      // If this was reached, the request reached the active, so the
+      // state is up-to-date with active and no further msync is needed.
+      msynced = true;
       lastProxy = activeProxy;
       return retVal;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index f50648d..525d9c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1409,7 +1409,8 @@ public class NameNodeRpcServer implements NamenodeProtocols {
 
   @Override // ClientProtocol
   public void msync() throws IOException {
-    // TODO : need to be filled up if needed. May be a no-op here.
+    // Check for write access to ensure that msync only happens on active
+    namesystem.checkOperation(OperationCategory.WRITE);
   }
 
   @Override // ClientProtocol
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 2845670..2bed37c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -178,8 +178,12 @@ public class TestConsistentReadsObserver {
         // Therefore, the subsequent getFileStatus call should succeed.
         dfs2.getClient().msync();
         dfs2.getFileStatus(testPath);
-        readStatus.set(1);
-      } catch (IOException e) {
+        if (HATestUtil.isSentToAnyOfNameNodes(dfs2, dfsCluster, 2)) {
+          readStatus.set(1);
+        } else {
+          readStatus.set(-1);
+        }
+      } catch (Exception e) {
         e.printStackTrace();
         readStatus.set(-1);
       }
@@ -196,6 +200,71 @@ public class TestConsistentReadsObserver {
     assertEquals(1, readStatus.get());
   }
 
+  // A new client should first contact the active, before using an observer,
+  // to ensure that it is up-to-date with the current state
+  @Test
+  public void testCallFromNewClient() throws Exception {
+    // Set the order of nodes: Observer, Standby, Active
+    // This is to ensure that test doesn't pass trivially because the active is
+    // the first node contacted
+    dfsCluster.transitionToStandby(0);
+    dfsCluster.transitionToObserver(0);
+    dfsCluster.transitionToStandby(2);
+    dfsCluster.transitionToActive(2);
+    try {
+      // 0 == not completed, 1 == succeeded, -1 == failed
+      AtomicInteger readStatus = new AtomicInteger(0);
+
+      // Initialize the proxies for Observer Node.
+      dfs.getClient().getHAServiceState();
+
+      // Advance Observer's state ID so it is ahead of client's.
+      dfs.mkdir(new Path("/test"), FsPermission.getDefault());
+      dfsCluster.getNameNode(2).getRpcServer().rollEditLog();
+      dfsCluster.getNameNode(0)
+          .getNamesystem().getEditLogTailer().doTailEdits();
+
+      dfs.mkdir(testPath, FsPermission.getDefault());
+      assertSentTo(2);
+
+      Configuration conf2 = new Configuration(conf);
+
+      // Disable FS cache so two different DFS clients will be used.
+      conf2.setBoolean("fs.hdfs.impl.disable.cache", true);
+      DistributedFileSystem dfs2 =
+          (DistributedFileSystem) FileSystem.get(conf2);
+      dfs2.getClient().getHAServiceState();
+
+      Thread reader = new Thread(() -> {
+        try {
+          dfs2.getFileStatus(testPath);
+          readStatus.set(1);
+        } catch (Exception e) {
+          e.printStackTrace();
+          readStatus.set(-1);
+        }
+      });
+
+      reader.start();
+
+      Thread.sleep(100);
+      assertEquals(0, readStatus.get());
+
+      dfsCluster.getNameNode(2).getRpcServer().rollEditLog();
+      dfsCluster.getNameNode(0)
+          .getNamesystem().getEditLogTailer().doTailEdits();
+
+      GenericTestUtils.waitFor(() -> readStatus.get() != 0, 100, 10000);
+      assertEquals(1, readStatus.get());
+    } finally {
+      // Put the cluster back the way it was when the test started
+      dfsCluster.transitionToStandby(2);
+      dfsCluster.transitionToObserver(2);
+      dfsCluster.transitionToStandby(0);
+      dfsCluster.transitionToActive(0);
+    }
+  }
+
   @Test
   public void testUncoordinatedCall() throws Exception {
     // make a write call so that client will be ahead of


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 05/05: YARN-7477. Moving logging APIs over to slf4j in hadoop-yarn-common. Contributed by Prabhu Joseph.

Posted by aa...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit bd8d299ded742813cabd4b4e7ce1e33e0d1f9509
Author: Akira Ajisaka <aa...@apache.org>
AuthorDate: Mon Mar 4 15:09:20 2019 +0900

    YARN-7477. Moving logging APIs over to slf4j in hadoop-yarn-common. Contributed by Prabhu Joseph.
---
 .../yarn/FileSystemBasedConfigurationProvider.java |   8 +-
 .../hadoop/yarn/YarnUncaughtExceptionHandler.java  |  14 ++-
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   |   7 +-
 .../org/apache/hadoop/yarn/client/AHSProxy.java    |   7 +-
 .../apache/hadoop/yarn/client/ClientRMProxy.java   |   7 +-
 .../client/ConfiguredRMFailoverProxyProvider.java  |   8 +-
 .../org/apache/hadoop/yarn/client/RMProxy.java     |   7 +-
 .../RequestHedgingRMFailoverProxyProvider.java     |   8 +-
 .../yarn/client/api/impl/DirectTimelineWriter.java |   8 +-
 .../client/api/impl/FileSystemTimelineWriter.java  |  18 +--
 .../yarn/client/api/impl/TimelineClientImpl.java   |   7 +-
 .../yarn/client/api/impl/TimelineConnector.java    |   7 +-
 .../client/api/impl/TimelineReaderClientImpl.java  |   8 +-
 .../yarn/client/api/impl/TimelineV2ClientImpl.java |   7 +-
 .../yarn/client/api/impl/TimelineWriter.java       |   8 +-
 .../apache/hadoop/yarn/event/AsyncDispatcher.java  |  13 ++-
 .../apache/hadoop/yarn/event/EventDispatcher.java  |  13 ++-
 .../factories/impl/pb/RpcClientFactoryPBImpl.java  |   8 +-
 .../factories/impl/pb/RpcServerFactoryPBImpl.java  |   7 +-
 .../apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java |   7 +-
 .../java/org/apache/hadoop/yarn/ipc/YarnRPC.java   |   7 +-
 .../AggregatedLogDeletionService.java              |   7 +-
 .../LogAggregationFileControllerFactory.java       |   6 +-
 .../tfile/LogAggregationTFileController.java       |   6 +-
 .../yarn/nodelabels/CommonNodeLabelsManager.java   |   7 +-
 .../yarn/nodelabels/FileSystemNodeLabelsStore.java |   8 +-
 .../nodelabels/NonAppendableFSNodeLabelStore.java  |   8 +-
 .../yarn/nodelabels/store/AbstractFSNodeStore.java |   7 +-
 .../hadoop/yarn/security/AMRMTokenIdentifier.java  |   7 +-
 .../hadoop/yarn/security/AMRMTokenSelector.java    |   8 +-
 .../hadoop/yarn/security/AdminACLsManager.java     |   7 +-
 .../yarn/security/ContainerTokenIdentifier.java    |   7 +-
 .../yarn/security/ContainerTokenSelector.java      |   8 +-
 .../hadoop/yarn/security/NMTokenIdentifier.java    |   7 +-
 .../hadoop/yarn/security/NMTokenSelector.java      |   8 +-
 .../yarn/security/YarnAuthorizationProvider.java   |   7 +-
 .../security/client/ClientToAMTokenSelector.java   |   8 +-
 .../security/client/RMDelegationTokenSelector.java |   8 +-
 .../client/TimelineDelegationTokenSelector.java    |   8 +-
 .../server/security/ApplicationACLsManager.java    |   8 +-
 .../yarn/util/AbstractLivelinessMonitor.java       |   7 +-
 .../apache/hadoop/yarn/util/AdHocLogDumper.java    | 123 +++++++++++----------
 .../org/apache/hadoop/yarn/util/FSDownload.java    |   7 +-
 .../hadoop/yarn/util/ProcfsBasedProcessTree.java   |  18 +--
 .../hadoop/yarn/util/ResourceCalculatorPlugin.java |   8 +-
 .../yarn/util/ResourceCalculatorProcessTree.java   |   8 +-
 .../java/org/apache/hadoop/yarn/util/Times.java    |   7 +-
 .../hadoop/yarn/util/WindowsBasedProcessTree.java  |   8 +-
 .../apache/hadoop/yarn/util/YarnVersionInfo.java   |   7 +-
 .../util/resource/DefaultResourceCalculator.java   |   8 +-
 .../util/resource/DominantResourceCalculator.java  |   7 +-
 .../hadoop/yarn/util/resource/Resources.java       |   8 +-
 .../yarn/webapp/GenericExceptionHandler.java       |   8 +-
 .../apache/hadoop/yarn/TestContainerLaunchRPC.java |   9 +-
 .../yarn/TestContainerResourceIncreaseRPC.java     |   8 +-
 .../hadoop/yarn/api/BasePBImplRecordsTest.java     |   7 +-
 .../api/records/timeline/TestTimelineRecords.java  |   8 +-
 .../TestTimelineServiceRecords.java                |   8 +-
 .../api/impl/TestTimelineClientForATS1_5.java      |   8 +-
 .../client/api/impl/TestTimelineClientV2Impl.java  |   8 +-
 .../apache/hadoop/yarn/event/InlineDispatcher.java |   7 +-
 .../logaggregation/TestAggregatedLogFormat.java    |   8 +-
 .../hadoop/yarn/util/TestAdHocLogDumper.java       |  31 +++---
 .../apache/hadoop/yarn/util/TestFSDownload.java    |   7 +-
 .../util/TestLog4jWarningErrorMetricsAppender.java |  32 +++---
 .../yarn/util/TestProcfsBasedProcessTree.java      |   8 +-
 .../apache/hadoop/yarn/util/TestRackResolver.java  |   7 +-
 .../yarn/util/TestWindowsBasedProcessTree.java     |   8 +-
 68 files changed, 383 insertions(+), 329 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
index 73c4990..b6ba660 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn;
 import java.io.IOException;
 import java.io.InputStream;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 public class FileSystemBasedConfigurationProvider
     extends ConfigurationProvider {
 
-  private static final Log LOG = LogFactory
-      .getLog(FileSystemBasedConfigurationProvider.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(FileSystemBasedConfigurationProvider.class);
   private FileSystem fs;
   private Path configDir;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java
index 7b4b774..d2ca6f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.yarn;
 
 import java.lang.Thread.UncaughtExceptionHandler;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Marker;
+import org.slf4j.MarkerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.util.ExitUtil;
@@ -39,7 +41,10 @@ import org.apache.hadoop.util.ShutdownHookManager;
 @Public
 @Evolving
 public class YarnUncaughtExceptionHandler implements UncaughtExceptionHandler {
-  private static final Log LOG = LogFactory.getLog(YarnUncaughtExceptionHandler.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnUncaughtExceptionHandler.class);
+  private static final Marker FATAL =
+      MarkerFactory.getMarker("FATAL");
   
   @Override
   public void uncaughtException(Thread t, Throwable e) {
@@ -48,7 +53,8 @@ public class YarnUncaughtExceptionHandler implements UncaughtExceptionHandler {
       		"down, so ignoring this", e);
     } else if(e instanceof Error) {
       try {
-        LOG.fatal("Thread " + t + " threw an Error.  Shutting down now...", e);
+        LOG.error(FATAL,
+            "Thread " + t + " threw an Error.  Shutting down now...", e);
       } catch (Throwable err) {
         //We don't want to not exit because of an issue with logging
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 076a49d..36fc959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.api.records.impl.pb;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
@@ -41,7 +41,8 @@ import java.util.Map;
 @Unstable
 public class ResourcePBImpl extends Resource {
 
-  private static final Log LOG = LogFactory.getLog(ResourcePBImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ResourcePBImpl.class);
 
   ResourceProto proto = ResourceProto.getDefaultInstance();
   ResourceProto.Builder builder = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java
index d523487..e2978b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java
@@ -22,8 +22,8 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedAction;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -35,7 +35,8 @@ import org.apache.hadoop.yarn.ipc.YarnRPC;
 @SuppressWarnings("unchecked")
 public class AHSProxy<T> {
 
-  private static final Log LOG = LogFactory.getLog(AHSProxy.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AHSProxy.class);
 
   public static <T> T createAHSProxy(final Configuration conf,
       final Class<T> protocol, InetSocketAddress ahsAddress) throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java
index 5b028e1..496b984 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java
@@ -22,8 +22,8 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -47,7 +47,8 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class ClientRMProxy<T> extends RMProxy<T>  {
-  private static final Log LOG = LogFactory.getLog(ClientRMProxy.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ClientRMProxy.class);
 
   private interface ClientRMProtocols extends ApplicationClientProtocol,
       ApplicationMasterProtocol, ResourceManagerAdministrationProtocol {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java
index d6b6cce..89c8753 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java
@@ -25,8 +25,8 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -39,8 +39,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 @InterfaceStability.Unstable
 public class ConfiguredRMFailoverProxyProvider<T>
     implements RMFailoverProxyProvider<T> {
-  private static final Log LOG =
-      LogFactory.getLog(ConfiguredRMFailoverProxyProvider.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ConfiguredRMFailoverProxyProvider.class);
 
   private int currentProxyIndex = 0;
   Map<String, T> proxies = new HashMap<String, T>();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index f7cb47a..d385d3e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -30,8 +30,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -56,7 +56,8 @@ import com.google.common.annotations.VisibleForTesting;
 @SuppressWarnings("unchecked")
 public class RMProxy<T> {
 
-  private static final Log LOG = LogFactory.getLog(RMProxy.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RMProxy.class);
   private UserGroupInformation user;
 
   protected RMProxy() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
index c1e9da1..d4c7f6f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
@@ -34,8 +34,8 @@ import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
@@ -55,8 +55,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 public class RequestHedgingRMFailoverProxyProvider<T>
     extends ConfiguredRMFailoverProxyProvider<T> {
 
-  private static final Log LOG =
-      LogFactory.getLog(RequestHedgingRMFailoverProxyProvider.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RequestHedgingRMFailoverProxyProvider.class);
 
   private volatile String successfulProxy = null;
   private ProxyInfo<T> wrappedProxy = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/DirectTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/DirectTimelineWriter.java
index abc2a28..7fea1db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/DirectTimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/DirectTimelineWriter.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.client.api.impl;
 import java.io.IOException;
 import java.net.URI;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -42,8 +42,8 @@ import com.sun.jersey.api.client.Client;
 @Unstable
 public class DirectTimelineWriter extends TimelineWriter{
 
-  private static final Log LOG = LogFactory
-      .getLog(DirectTimelineWriter.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(DirectTimelineWriter.class);
 
   public DirectTimelineWriter(UserGroupInformation authUgi,
       Client client, URI resURI) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index c00a0b8..bc5e987 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -38,8 +38,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -78,8 +78,8 @@ import com.sun.jersey.api.client.Client;
 @Unstable
 public class FileSystemTimelineWriter extends TimelineWriter{
 
-  private static final Log LOG = LogFactory
-      .getLog(FileSystemTimelineWriter.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(FileSystemTimelineWriter.class);
 
   // App log directory must be readable by group so server can access logs
   // and writable by group so it can be deleted by server
@@ -267,7 +267,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       LOG.debug("Closing cache");
       logFDsCache.flush();
     }
-    IOUtils.cleanup(LOG, logFDsCache, fs);
+    IOUtils.cleanupWithLogger(LOG, logFDsCache, fs);
   }
 
   @Override
@@ -355,8 +355,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{
 
     public void close() {
       if (stream != null) {
-        IOUtils.cleanup(LOG, jsonGenerator);
-        IOUtils.cleanup(LOG, stream);
+        IOUtils.cleanupWithLogger(LOG, jsonGenerator);
+        IOUtils.cleanupWithLogger(LOG, stream);
         stream = null;
         jsonGenerator = null;
       }
@@ -559,7 +559,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
           flush();
         } catch (Exception e) {
           if (LOG.isDebugEnabled()) {
-            LOG.debug(e);
+            LOG.debug(e.toString());
           }
         }
       }
@@ -636,7 +636,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
         try {
           cleanInActiveFDs();
         } catch (Exception e) {
-          LOG.warn(e);
+          LOG.warn(e.toString());
         }
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 88fccd9..7eb4ec1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -28,8 +28,8 @@ import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Options;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
@@ -58,7 +58,8 @@ import com.sun.jersey.api.client.Client;
 @Evolving
 public class TimelineClientImpl extends TimelineClient {
 
-  private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineClientImpl.class);
   private static final ObjectMapper MAPPER = new ObjectMapper();
   private static final String RESOURCE_URI_STR_V1 = "/ws/v1/timeline/";
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
index 9d084d7..ca0f307 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -34,8 +34,8 @@ import javax.net.ssl.HostnameVerifier;
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLSocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -73,7 +73,8 @@ import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
 public class TimelineConnector extends AbstractService {
 
   private static final Joiner JOINER = Joiner.on("");
-  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineConnector.class);
   public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
 
   private SSLFactory sslFactory;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
index db53f93..475864e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.core.util.MultivaluedMapImpl;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -53,8 +53,8 @@ import static org.apache.hadoop.yarn.util.StringHelper.PATH_JOINER;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class TimelineReaderClientImpl extends TimelineReaderClient {
-  private static final Log LOG =
-      LogFactory.getLog(TimelineReaderClientImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineReaderClientImpl.class);
 
   private static final String RESOURCE_URI_STR_V2 = "/ws/v2/timeline/";
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
index c7c6587..e086e27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
@@ -36,8 +36,8 @@ import java.util.concurrent.TimeUnit;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.MultivaluedMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
@@ -64,7 +64,8 @@ import com.sun.jersey.core.util.MultivaluedMapImpl;
  *
  */
 public class TimelineV2ClientImpl extends TimelineV2Client {
-  private static final Log LOG = LogFactory.getLog(TimelineV2ClientImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineV2ClientImpl.class);
 
   private static final String RESOURCE_URI_STR_V2 = "/ws/v2/timeline/";
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
index b3a886b..f52479d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
@@ -26,8 +26,8 @@ import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import javax.ws.rs.core.MediaType;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -52,8 +52,8 @@ import com.sun.jersey.api.client.WebResource;
 @Unstable
 public abstract class TimelineWriter implements Flushable {
 
-  private static final Log LOG = LogFactory
-      .getLog(TimelineWriter.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TimelineWriter.class);
 
   private UserGroupInformation authUgi;
   private Client client;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 4a78a22..333faa5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -25,8 +25,10 @@ import java.util.Map;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Marker;
+import org.slf4j.MarkerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.service.AbstractService;
@@ -46,7 +48,10 @@ import com.google.common.annotations.VisibleForTesting;
 @Evolving
 public class AsyncDispatcher extends AbstractService implements Dispatcher {
 
-  private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AsyncDispatcher.class);
+  private static final Marker FATAL =
+      MarkerFactory.getMarker("FATAL");
 
   private final BlockingQueue<Event> eventQueue;
   private volatile int lastEventQueueSizeLogged = 0;
@@ -200,7 +205,7 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
       }
     } catch (Throwable t) {
       //TODO Maybe log the state of the queue
-      LOG.fatal("Error in dispatcher thread", t);
+      LOG.error(FATAL, "Error in dispatcher thread", t);
       // If serviceStop is called, we should exit this thread gracefully.
       if (exitOnDispatchException
           && (ShutdownHookManager.get().isShutdownInProgress()) == false
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java
index 7c7a87b..ccd8e2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.yarn.event;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Marker;
+import org.slf4j.MarkerFactory;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -46,7 +48,10 @@ public class EventDispatcher<T extends Event> extends
   private volatile boolean stopped = false;
   private boolean shouldExitOnError = true;
 
-  private static final Log LOG = LogFactory.getLog(EventDispatcher.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(EventDispatcher.class);
+  private static final Marker FATAL =
+      MarkerFactory.getMarker("FATAL");
 
   private final class EventProcessor implements Runnable {
     @Override
@@ -72,7 +77,7 @@ public class EventDispatcher<T extends Event> extends
             LOG.warn("Exception during shutdown: ", t);
             break;
           }
-          LOG.fatal("Error in handling event type " + event.getType()
+          LOG.error(FATAL, "Error in handling event type " + event.getType()
               + " to the Event Dispatcher", t);
           if (shouldExitOnError
               && !ShutdownHookManager.get().isShutdownInProgress()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 07c5e23..c28ea33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -27,8 +27,8 @@ import java.net.InetSocketAddress;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
@@ -38,8 +38,8 @@ import org.apache.hadoop.yarn.factories.RpcClientFactory;
 @Private
 public class RpcClientFactoryPBImpl implements RpcClientFactory {
 
-  private static final Log LOG = LogFactory
-      .getLog(RpcClientFactoryPBImpl.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(RpcClientFactoryPBImpl.class);
 
   private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.client";
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index ec9a5f2..677850c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -26,8 +26,8 @@ import java.net.InetSocketAddress;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -43,7 +43,8 @@ import com.google.protobuf.BlockingService;
 @Private
 public class RpcServerFactoryPBImpl implements RpcServerFactory {
 
-  private static final Log LOG = LogFactory.getLog(RpcServerFactoryPBImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RpcServerFactoryPBImpl.class);
   private static final String PROTO_GEN_PACKAGE_NAME = "org.apache.hadoop.yarn.proto";
   private static final String PROTO_GEN_CLASS_SUFFIX = "Service";
   private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.service";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
index 040917d..d9d999f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.ipc;
 
 import java.net.InetSocketAddress;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
@@ -39,7 +39,8 @@ import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
 @InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
 public class HadoopYarnProtoRPC extends YarnRPC {
 
-  private static final Log LOG = LogFactory.getLog(HadoopYarnProtoRPC.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HadoopYarnProtoRPC.class);
 
   @Override
   public Object getProxy(Class protocol, InetSocketAddress addr,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
index 7f9fb77..436445f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.ipc;
 
 import java.net.InetSocketAddress;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
@@ -35,7 +35,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
  */
 @InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
 public abstract class YarnRPC {
-  private static final Log LOG = LogFactory.getLog(YarnRPC.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnRPC.class);
   
   public abstract Object getProxy(Class protocol, InetSocketAddress addr,
       Configuration conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 841b870..90395aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -22,8 +22,8 @@ import java.io.IOException;
 import java.util.Timer;
 import java.util.TimerTask;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -50,7 +50,8 @@ import com.google.common.annotations.VisibleForTesting;
  */
 @InterfaceAudience.LimitedPrivate({"yarn", "mapreduce"})
 public class AggregatedLogDeletionService extends AbstractService {
-  private static final Log LOG = LogFactory.getLog(AggregatedLogDeletionService.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AggregatedLogDeletionService.class);
   
   private Timer timer = null;
   private long checkIntervalMsecs;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java
index cf40209..8339c1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java
@@ -29,8 +29,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.regex.Pattern;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -51,7 +51,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 @Unstable
 public class LogAggregationFileControllerFactory {
 
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       LogAggregationFileControllerFactory.class);
   private final Pattern p = Pattern.compile(
       "^[A-Za-z_]+[A-Za-z0-9_]*$");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
index b3103d2..108595b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
@@ -27,8 +27,8 @@ import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.commons.math3.util.Pair;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -67,7 +67,7 @@ import org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block;
 public class LogAggregationTFileController
     extends LogAggregationFileController {
 
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       LogAggregationTFileController.class);
 
   private LogWriter writer;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 19254c1..e5b3d63 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -37,8 +37,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
@@ -63,7 +63,8 @@ import com.google.common.collect.ImmutableSet;
 
 @Private
 public class CommonNodeLabelsManager extends AbstractService {
-  protected static final Log LOG = LogFactory.getLog(CommonNodeLabelsManager.class);
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(CommonNodeLabelsManager.class);
   public static final Set<String> EMPTY_STRING_SET = Collections
       .unmodifiableSet(new HashSet<String>(0));
   public static final Set<NodeLabel> EMPTY_NODELABEL_SET = Collections
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
index 6c459c2..32e2268 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.nodelabels;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -46,8 +46,8 @@ import java.util.Set;
 public class FileSystemNodeLabelsStore
     extends AbstractFSNodeStore<CommonNodeLabelsManager>
     implements NodeLabelsStore {
-  protected static final Log LOG =
-      LogFactory.getLog(FileSystemNodeLabelsStore.class);
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(FileSystemNodeLabelsStore.class);
 
   protected static final String DEFAULT_DIR_NAME = "node-labels";
   protected static final String MIRROR_FILENAME = "nodelabel.mirror";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
index 6747037..9e90f33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.nodelabels;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -39,8 +39,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
  * Store implementation for Non Appendable File Store.
  */
 public class NonAppendableFSNodeLabelStore extends FileSystemNodeLabelsStore {
-  protected static final Log
-      LOG = LogFactory.getLog(NonAppendableFSNodeLabelStore.class);
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(NonAppendableFSNodeLabelStore.class);
 
   @Override
   public void close() throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java
index 7127d11..d0fb837 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.yarn.nodelabels.store;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,7 +40,8 @@ import java.io.IOException;
  */
 public abstract class AbstractFSNodeStore<M> {
 
-  protected static final Log LOG = LogFactory.getLog(AbstractFSNodeStore.class);
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(AbstractFSNodeStore.class);
 
   private StoreType storeType;
   private FSDataOutputStream editlogOs;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
index ed83b06..591c09f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
@@ -25,8 +25,8 @@ import java.io.DataOutput;
 import java.io.IOException;
 
 import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -51,7 +51,8 @@ import com.google.protobuf.TextFormat;
 @Evolving
 public class AMRMTokenIdentifier extends TokenIdentifier {
 
-  private static final Log LOG = LogFactory.getLog(AMRMTokenIdentifier.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AMRMTokenIdentifier.class);
 
   public static final Text KIND_NAME = new Text("YARN_AM_RM_TOKEN");
   private AMRMTokenIdentifierProto proto;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
index be3701d..a041334 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.io.Text;
@@ -34,8 +34,8 @@ import org.apache.hadoop.security.token.TokenSelector;
 public class AMRMTokenSelector implements
     TokenSelector<AMRMTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(AMRMTokenSelector.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(AMRMTokenSelector.class);
 
   @SuppressWarnings("unchecked")
   public Token<AMRMTokenIdentifier> selectToken(Text service,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java
index a386123..949c6a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
@@ -36,7 +36,8 @@ public class AdminACLsManager {
   /**
    * Log object for this class
    */
-  static Log LOG = LogFactory.getLog(AdminACLsManager.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AdminACLsManager.class);
 
   /**
    * The current user at the time of object creation
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 8dea65f..7852b9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -28,8 +28,8 @@ import java.util.HashSet;
 import java.util.Set;
 
 import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -68,7 +68,8 @@ import com.google.protobuf.TextFormat;
 @Evolving
 public class ContainerTokenIdentifier extends TokenIdentifier {
 
-  private static Log LOG = LogFactory.getLog(ContainerTokenIdentifier.class);
+  private final static Logger LOG =
+      LoggerFactory.getLogger(ContainerTokenIdentifier.class);
 
   public static final Text KIND = new Text("ContainerToken");
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
index a1c0a91..65c59ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.io.Text;
@@ -34,8 +34,8 @@ import org.apache.hadoop.security.token.TokenSelector;
 public class ContainerTokenSelector implements
     TokenSelector<ContainerTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(ContainerTokenSelector.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(ContainerTokenSelector.class);
 
   @SuppressWarnings("unchecked")
   @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
index cd1ad03..1a7323f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
@@ -25,8 +25,8 @@ import java.io.DataOutput;
 import java.io.IOException;
 
 import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.io.IOUtils;
@@ -46,7 +46,8 @@ import com.google.protobuf.TextFormat;
 @Evolving
 public class NMTokenIdentifier extends TokenIdentifier {
 
-  private static Log LOG = LogFactory.getLog(NMTokenIdentifier.class);
+  private final static Logger LOG =
+      LoggerFactory.getLogger(NMTokenIdentifier.class);
 
   public static final Text KIND = new Text("NMToken");
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
index 43899b5..ecc65c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -30,8 +30,8 @@ import org.apache.hadoop.security.token.TokenSelector;
 public class NMTokenSelector implements
     TokenSelector<NMTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(NMTokenSelector.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(NMTokenSelector.class);
 
   @SuppressWarnings("unchecked")
   @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
index 9ae4bd7..7f39fa4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.security;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -41,7 +41,8 @@ import java.util.List;
 @Unstable
 public abstract class YarnAuthorizationProvider {
 
-  private static final Log LOG = LogFactory.getLog(YarnAuthorizationProvider.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnAuthorizationProvider.class);
 
   private static YarnAuthorizationProvider authorizer = null;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
index e102fb2..5718965 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security.client;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -30,8 +30,8 @@ import org.apache.hadoop.security.token.TokenSelector;
 public class ClientToAMTokenSelector implements
     TokenSelector<ClientToAMTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(ClientToAMTokenSelector.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(ClientToAMTokenSelector.class);
 
   @SuppressWarnings("unchecked")
   public Token<ClientToAMTokenIdentifier> selectToken(Text service,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
index 9ab2d76..cfeb62f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security.client;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.io.Text;
@@ -34,8 +34,8 @@ import org.apache.hadoop.security.token.TokenSelector;
 public class RMDelegationTokenSelector implements
     TokenSelector<RMDelegationTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(RMDelegationTokenSelector.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(RMDelegationTokenSelector.class);
 
   private boolean checkService(Text service,
       Token<? extends TokenIdentifier> token) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
index 252dfa1..b75f288 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.security.client;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.io.Text;
@@ -34,8 +34,8 @@ import org.apache.hadoop.security.token.TokenSelector;
 public class TimelineDelegationTokenSelector
     implements TokenSelector<TimelineDelegationTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(TimelineDelegationTokenSelector.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TimelineDelegationTokenSelector.class);
 
   @SuppressWarnings("unchecked")
   public Token<TimelineDelegationTokenIdentifier> selectToken(Text service,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
index 97b4163..8cf34e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
@@ -23,8 +23,8 @@ import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.AccessControlException;
@@ -40,8 +40,8 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceAudience.Private
 public class ApplicationACLsManager {
 
-  private static final Log LOG = LogFactory
-      .getLog(ApplicationACLsManager.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(ApplicationACLsManager.class);
 
   private static AccessControlList DEFAULT_YARN_APP_ACL 
     = new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
index 638128e..688c7c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
@@ -22,8 +22,8 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.service.AbstractService;
@@ -37,7 +37,8 @@ import org.apache.hadoop.service.AbstractService;
 @Evolving
 public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
 
-  private static final Log LOG = LogFactory.getLog(AbstractLivelinessMonitor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AbstractLivelinessMonitor.class);
 
   //thread which runs periodically to see the last time since a heartbeat is
   //received.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
index f33ddcd..74ee5fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
@@ -18,25 +18,34 @@
 
 package org.apache.hadoop.yarn.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.FileAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.Priority;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.log4j.*;
 
 import com.google.common.annotations.VisibleForTesting;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.*;
+import java.util.Enumeration;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Timer;
+import java.util.TimerTask;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class AdHocLogDumper {
 
-  private static final Log LOG = LogFactory.getLog(AdHocLogDumper.class);
+  private static final Logger LOG =
+      LogManager.getLogger(AdHocLogDumper.class);
 
   private String name;
   private String targetFilename;
@@ -54,58 +63,53 @@ public class AdHocLogDumper {
 
   public void dumpLogs(String level, int timePeriod)
       throws YarnRuntimeException, IOException {
-    synchronized (lock) {
+    synchronized (lock){
       if (logFlag) {
         LOG.info("Attempt to dump logs when appender is already running");
         throw new YarnRuntimeException("Appender is already dumping logs");
       }
       Level targetLevel = Level.toLevel(level);
-      Log log = LogFactory.getLog(name);
+      Logger logger = LogManager.getLogger(name);
       appenderLevels.clear();
-      if (log instanceof Log4JLogger) {
-        Logger packageLogger = ((Log4JLogger) log).getLogger();
-        currentLogLevel = packageLogger.getLevel();
-        Level currentEffectiveLevel = packageLogger.getEffectiveLevel();
+      currentLogLevel = logger.getLevel();
+      Level currentEffectiveLevel = logger.getEffectiveLevel();
 
-        // make sure we can create the appender first
-        Layout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
-        FileAppender fApp;
-        File file =
-            new File(System.getProperty("yarn.log.dir"), targetFilename);
-        try {
-          fApp = new FileAppender(layout, file.getAbsolutePath(), false);
-        } catch (IOException ie) {
-          LOG
-            .warn(
-              "Error creating file, can't dump logs to "
-                  + file.getAbsolutePath(), ie);
-          throw ie;
-        }
-        fApp.setName(AdHocLogDumper.AD_HOC_DUMPER_APPENDER);
-        fApp.setThreshold(targetLevel);
+      // make sure we can create the appender first
+      Layout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
+      FileAppender fApp;
+      File file =
+          new File(System.getProperty("yarn.log.dir"), targetFilename);
+      try {
+        fApp = new FileAppender(layout, file.getAbsolutePath(), false);
+      } catch (IOException ie) {
+        LOG.warn("Error creating file, can't dump logs to "
+            + file.getAbsolutePath(), ie);
+        throw ie;
+      }
+      fApp.setName(AdHocLogDumper.AD_HOC_DUMPER_APPENDER);
+      fApp.setThreshold(targetLevel);
 
-        // get current threshold of all appenders and set it to the effective
-        // level
-        for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
-          .hasMoreElements();) {
-          Object obj = appenders.nextElement();
-          if (obj instanceof AppenderSkeleton) {
-            AppenderSkeleton appender = (AppenderSkeleton) obj;
-            appenderLevels.put(appender.getName(), appender.getThreshold());
-            appender.setThreshold(currentEffectiveLevel);
-          }
+      // get current threshold of all appenders and set it to the effective
+      // level
+      for (Enumeration appenders = Logger.getRootLogger().getAllAppenders();
+          appenders.hasMoreElements();) {
+        Object obj = appenders.nextElement();
+        if (obj instanceof AppenderSkeleton) {
+          AppenderSkeleton appender = (AppenderSkeleton) obj;
+          appenderLevels.put(appender.getName(), appender.getThreshold());
+          appender.setThreshold(currentEffectiveLevel);
         }
+      }
 
-        packageLogger.addAppender(fApp);
-        LOG.info("Dumping adhoc logs for " + name + " to "
-            + file.getAbsolutePath() + " for " + timePeriod + " milliseconds");
-        packageLogger.setLevel(targetLevel);
-        logFlag = true;
+      logger.addAppender(fApp);
+      LOG.info("Dumping adhoc logs for " + name + " to "
+          + file.getAbsolutePath() + " for " + timePeriod + " milliseconds");
+      logger.setLevel(targetLevel);
+      logFlag = true;
 
-        TimerTask restoreLogLevel = new RestoreLogLevel();
-        Timer restoreLogLevelTimer = new Timer();
-        restoreLogLevelTimer.schedule(restoreLogLevel, timePeriod);
-      }
+      TimerTask restoreLogLevel = new RestoreLogLevel();
+      Timer restoreLogLevelTimer = new Timer();
+      restoreLogLevelTimer.schedule(restoreLogLevel, timePeriod);
     }
   }
 
@@ -117,22 +121,19 @@ public class AdHocLogDumper {
   class RestoreLogLevel extends TimerTask {
     @Override
     public void run() {
-      Log log = LogFactory.getLog(name);
-      if (log instanceof Log4JLogger) {
-        Logger logger = ((Log4JLogger) log).getLogger();
-        logger.removeAppender(AD_HOC_DUMPER_APPENDER);
-        logger.setLevel(currentLogLevel);
-        for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
-          .hasMoreElements();) {
-          Object obj = appenders.nextElement();
-          if (obj instanceof AppenderSkeleton) {
-            AppenderSkeleton appender = (AppenderSkeleton) obj;
-            appender.setThreshold(appenderLevels.get(appender.getName()));
-          }
+      Logger logger = LogManager.getLogger(name);
+      logger.removeAppender(AD_HOC_DUMPER_APPENDER);
+      logger.setLevel(currentLogLevel);
+      for (Enumeration appenders = Logger.getRootLogger().getAllAppenders();
+          appenders.hasMoreElements();) {
+        Object obj = appenders.nextElement();
+        if (obj instanceof AppenderSkeleton) {
+          AppenderSkeleton appender = (AppenderSkeleton) obj;
+          appender.setThreshold(appenderLevels.get(appender.getName()));
         }
-        logFlag = false;
-        LOG.info("Done dumping adhoc logs for " + name);
       }
+      logFlag = false;
+      LOG.info("Done dumping adhoc logs for " + name);
     }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
index d203f65..08a5724 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
@@ -32,8 +32,8 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
@@ -66,7 +66,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 @LimitedPrivate({"YARN", "MapReduce"})
 public class FSDownload implements Callable<Path> {
 
-  private static final Log LOG = LogFactory.getLog(FSDownload.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FSDownload.class);
 
   private FileContext files;
   private final UserGroupInformation userUgi;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index e8b6533..0bfd40b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -44,8 +44,8 @@ import org.apache.commons.io.filefilter.DirectoryFileFilter;
 import org.apache.commons.io.filefilter.RegexFileFilter;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -61,8 +61,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 @InterfaceStability.Unstable
 public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
 
-  static final Log LOG = LogFactory
-      .getLog(ProcfsBasedProcessTree.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(ProcfsBasedProcessTree.class);
 
   private static final String PROCFS = "/proc/";
 
@@ -264,7 +264,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         }
       }
 
-      LOG.debug(this);
+      LOG.debug(this.toString());
 
       if (smapsEnabled) {
         // Update smaps info
@@ -409,8 +409,8 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
                     + ", total : " + (total * KB_TO_BYTES));
               }
             }
+            LOG.debug(procMemInfo.toString());
           }
-          LOG.debug(procMemInfo);
         }
       }
     }
@@ -807,11 +807,11 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         }
       }
     } catch (FileNotFoundException f) {
-      LOG.error(f);
+      LOG.error(f.toString());
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error(e.toString());
     } catch (Throwable t) {
-      LOG.error(t);
+      LOG.error(t.toString());
     } finally {
       IOUtils.closeQuietly(in);
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index fd63d98..5fcc474 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.yarn.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,8 +33,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 @InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
 @InterfaceStability.Unstable
 public class ResourceCalculatorPlugin extends Configured {
-  private static final Log LOG =
-      LogFactory.getLog(ResourceCalculatorPlugin.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ResourceCalculatorPlugin.class);
 
   private final SysInfo sys;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index c581b83..9bbec5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.util;
 
 import java.lang.reflect.Constructor;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -38,8 +38,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 @Public
 @Evolving
 public abstract class ResourceCalculatorProcessTree extends Configured {
-  static final Log LOG = LogFactory
-      .getLog(ResourceCalculatorProcessTree.class);
+  static final Logger LOG = LoggerFactory
+      .getLogger(ResourceCalculatorProcessTree.class);
   public static final int UNAVAILABLE = -1;
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
index 3c41558..b6604ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
@@ -22,13 +22,14 @@ import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 
 @Private
 public class Times {
-  private static final Log LOG = LogFactory.getLog(Times.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Times.class);
 
   static final String ISO8601DATEFORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSSZ";
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index b4e5456..d7a92f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
@@ -23,8 +23,8 @@ import java.math.BigInteger;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.util.CpuTimeTracker;
 import org.apache.hadoop.util.Shell;
@@ -34,8 +34,8 @@ import org.apache.hadoop.util.StringUtils;
 @Private
 public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
 
-  static final Log LOG = LogFactory
-      .getLog(WindowsBasedProcessTree.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(WindowsBasedProcessTree.class);
 
   static class ProcessInfo {
     String pid; // process pid
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
index e515321..c6399d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -30,7 +30,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class YarnVersionInfo extends VersionInfo {
-  private static final Log LOG = LogFactory.getLog(YarnVersionInfo.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnVersionInfo.class);
 
   private static YarnVersionInfo YARN_VERSION_INFO = new YarnVersionInfo();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 9a3f703..61b1a87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -17,8 +17,8 @@
 */
 package org.apache.hadoop.yarn.util.resource;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -26,8 +26,8 @@ import org.apache.hadoop.yarn.api.records.Resource;
 @Private
 @Unstable
 public class DefaultResourceCalculator extends ResourceCalculator {
-  private static final Log LOG =
-      LogFactory.getLog(DefaultResourceCalculator.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DefaultResourceCalculator.class);
 
   @Override
   public int compare(Resource unused, Resource lhs, Resource rhs,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 17244e9..16176ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -17,8 +17,8 @@
 */
 package org.apache.hadoop.yarn.util.resource;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -53,7 +53,8 @@ import java.util.Arrays;
 @Private
 @Unstable
 public class DominantResourceCalculator extends ResourceCalculator {
-  static final Log LOG = LogFactory.getLog(DominantResourceCalculator.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DominantResourceCalculator.class);
 
   public DominantResourceCalculator() {
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 40d8d38..9b96fd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.util.resource;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -37,8 +37,8 @@ public class Resources {
 
   private enum RoundingDirection { UP, DOWN }
 
-  private static final Log LOG =
-      LogFactory.getLog(Resources.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Resources.class);
 
   /**
    * Helper class to create a resource with a fixed value for all resource
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
index 8946e2d..0ada51a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
@@ -28,8 +28,8 @@ import javax.ws.rs.ext.ExceptionMapper;
 import javax.ws.rs.ext.Provider;
 import javax.xml.bind.UnmarshalException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -44,8 +44,8 @@ import com.google.inject.Singleton;
 @Singleton
 @Provider
 public class GenericExceptionHandler implements ExceptionMapper<Exception> {
-  public static final Log LOG = LogFactory
-      .getLog(GenericExceptionHandler.class);
+  public static final Logger LOG = LoggerFactory
+      .getLogger(GenericExceptionHandler.class);
 
   private @Context
   HttpServletResponse response;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
index 34e2198..279a37b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
@@ -24,8 +24,8 @@ import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
@@ -79,7 +79,8 @@ import org.junit.Test;
  */
 public class TestContainerLaunchRPC {
 
-  static final Log LOG = LogFactory.getLog(TestContainerLaunchRPC.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestContainerLaunchRPC.class);
 
   private static final RecordFactory recordFactory = RecordFactoryProvider
       .getRecordFactory(null);
@@ -171,7 +172,7 @@ public class TestContainerLaunchRPC {
         // make the thread sleep to look like its not going to respond
         Thread.sleep(10000);
       } catch (Exception e) {
-        LOG.error(e);
+        LOG.error(e.toString());
         throw new YarnException(e);
       }
       throw new YarnException("Shouldn't happen!!");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java
index 1690b81..c3dac91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
@@ -74,7 +74,7 @@ import java.util.List;
  */
 public class TestContainerResourceIncreaseRPC {
 
-  static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       TestContainerResourceIncreaseRPC.class);
 
   @Test
@@ -188,7 +188,7 @@ public class TestContainerResourceIncreaseRPC {
         // make the thread sleep to look like its not going to respond
         Thread.sleep(10000);
       } catch (Exception e) {
-        LOG.error(e);
+        LOG.error(e.toString());
         throw new YarnException(e);
       }
       throw new YarnException("Shouldn't happen!!");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
index bbb7840..25eb9e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
@@ -21,8 +21,8 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import org.apache.commons.lang3.Range;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.junit.Assert;
@@ -40,7 +40,8 @@ import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
  * Generic helper class to validate protocol records.
  */
 public class BasePBImplRecordsTest {
-  static final Log LOG = LogFactory.getLog(BasePBImplRecordsTest.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BasePBImplRecordsTest.class);
 
   @SuppressWarnings("checkstyle:visibilitymodifier")
   protected static HashMap<Type, Object> typeValueCache =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
index 9d16edb..0de8200 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
@@ -27,8 +27,8 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.WeakHashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.junit.Assert;
@@ -36,8 +36,8 @@ import org.junit.Test;
 
 public class TestTimelineRecords {
 
-  private static final Log LOG =
-      LogFactory.getLog(TestTimelineRecords.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestTimelineRecords.class);
 
   @Test
   public void testEntities() throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestTimelineServiceRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestTimelineServiceRecords.java
index 221969b..b488a65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestTimelineServiceRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestTimelineServiceRecords.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.yarn.api.records.timelineservice;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -36,8 +36,8 @@ import java.util.Map;
 
 
 public class TestTimelineServiceRecords {
-  private static final Log LOG =
-      LogFactory.getLog(TestTimelineServiceRecords.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestTimelineServiceRecords.class);
 
   @Test
   public void testTimelineEntities() throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java
index 76e5714..26dd7f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java
@@ -30,8 +30,8 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -52,8 +52,8 @@ import com.sun.jersey.api.client.ClientResponse;
 
 public class TestTimelineClientForATS1_5 {
 
-  protected static Log LOG = LogFactory
-    .getLog(TestTimelineClientForATS1_5.class);
+  private final static Logger LOG = LoggerFactory
+      .getLogger(TestTimelineClientForATS1_5.class);
 
   private TimelineClientImpl client;
   private static FileContext localFS;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
index 95595a9..6770c07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
@@ -30,8 +30,8 @@ import java.util.List;
 
 import javax.ws.rs.core.MultivaluedMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -50,8 +50,8 @@ import org.junit.Test;
 import org.junit.rules.TestName;
 
 public class TestTimelineClientV2Impl {
-  private static final Log LOG =
-      LogFactory.getLog(TestTimelineClientV2Impl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestTimelineClientV2Impl.class);
   private TestV2TimelineClient client;
   private static final long TIME_TO_SLEEP = 150L;
   private static final String EXCEPTION_MSG = "Exception in the content";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
index 6aa56d8..cd6274a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
@@ -18,15 +18,16 @@
 
 package org.apache.hadoop.yarn.event;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 
 @SuppressWarnings({"unchecked", "rawtypes"})
 public class InlineDispatcher extends AsyncDispatcher {
-  private static final Log LOG = LogFactory.getLog(InlineDispatcher.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(InlineDispatcher.class);
 
   private class TestEventHandler implements EventHandler {
     @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index f85445e..6c26c40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -38,8 +38,8 @@ import java.util.Collections;
 import java.util.concurrent.CountDownLatch;
 
 import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileStatus;
@@ -70,8 +70,8 @@ public class TestAggregatedLogFormat {
   private static final Configuration conf = new Configuration();
   private static final FileSystem fs;
   private static final char filler = 'x';
-  private static final Log LOG = LogFactory
-      .getLog(TestAggregatedLogFormat.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestAggregatedLogFormat.class);
 
   static {
     try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
index 046c94e..4b2545e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
@@ -18,14 +18,14 @@
 
 package org.apache.hadoop.yarn.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Logger;
 import org.apache.log4j.Priority;
+import org.apache.log4j.LogManager;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -34,20 +34,22 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.Map;
 
+import static org.apache.hadoop.util.GenericsUtil.isLog4jLogger;
+
 public class TestAdHocLogDumper {
 
-  private static final Log LOG = LogFactory.getLog(TestAdHocLogDumper.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestAdHocLogDumper.class);
 
   @Test
   public void testDumpingSchedulerLogs() throws Exception {
 
     Map<Appender, Priority> levels = new HashMap<>();
-    String logHierarchy = TestAdHocLogDumper.class.getName();
     String logFilename = "test.log";
-    Log log = LogFactory.getLog(logHierarchy);
-    if (log instanceof Log4JLogger) {
-      for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
-        .hasMoreElements();) {
+    Logger logger = LoggerFactory.getLogger(TestAdHocLogDumper.class);
+    if (isLog4jLogger(this.getClass())) {
+      for (Enumeration appenders = LogManager.getRootLogger().
+          getAllAppenders(); appenders.hasMoreElements();) {
         Object obj = appenders.nextElement();
         if (obj instanceof AppenderSkeleton) {
           AppenderSkeleton appender = (AppenderSkeleton) obj;
@@ -56,7 +58,8 @@ public class TestAdHocLogDumper {
       }
     }
 
-    AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logFilename);
+    AdHocLogDumper dumper = new AdHocLogDumper(this.getClass().getName(),
+        logFilename);
     dumper.dumpLogs("DEBUG", 1000);
     LOG.debug("test message 1");
     LOG.info("test message 2");
@@ -68,9 +71,9 @@ public class TestAdHocLogDumper {
     Assert.assertTrue(logFile.length() != 0);
 
     // make sure levels are set back to their original values
-    if (log instanceof Log4JLogger) {
-      for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
-        .hasMoreElements();) {
+    if (isLog4jLogger(this.getClass())) {
+      for (Enumeration appenders = LogManager.getRootLogger().
+          getAllAppenders(); appenders.hasMoreElements();) {
         Object obj = appenders.nextElement();
         if (obj instanceof AppenderSkeleton) {
           AppenderSkeleton appender = (AppenderSkeleton) obj;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
index 08d6189..678687f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
@@ -58,8 +58,8 @@ import org.junit.Assert;
 
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -87,7 +87,8 @@ import com.google.common.cache.LoadingCache;
  */
 public class TestFSDownload {
 
-  private static final Log LOG = LogFactory.getLog(TestFSDownload.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestFSDownload.class);
   private static AtomicLong uniqueNumberGenerator =
     new AtomicLong(System.currentTimeMillis());
   private enum TEST_FILE_TYPE {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
index e788e80..46c891a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
@@ -18,14 +18,17 @@
 
 package org.apache.hadoop.yarn.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Marker;
+import org.slf4j.MarkerFactory;
+import org.apache.log4j.LogManager;
 import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
+
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -33,7 +36,10 @@ import java.util.Map;
 public class TestLog4jWarningErrorMetricsAppender {
 
   Log4jWarningErrorMetricsAppender appender;
-  Log logger = LogFactory.getLog(TestLog4jWarningErrorMetricsAppender.class);
+  private static final Logger LOG = LoggerFactory.
+      getLogger(TestLog4jWarningErrorMetricsAppender.class);
+  private static final Marker FATAL =
+      MarkerFactory.getMarker("FATAL");
   List<Long> cutoff = new ArrayList<>();
 
   void setupAppender(int cleanupIntervalSeconds, long messageAgeLimitSeconds,
@@ -42,33 +48,33 @@ public class TestLog4jWarningErrorMetricsAppender {
     appender =
         new Log4jWarningErrorMetricsAppender(cleanupIntervalSeconds,
           messageAgeLimitSeconds, maxUniqueMessages);
-    Logger.getRootLogger().addAppender(appender);
+    LogManager.getRootLogger().addAppender(appender);
   }
 
   void removeAppender() {
-    Logger.getRootLogger().removeAppender(appender);
+    LogManager.getRootLogger().removeAppender(appender);
   }
 
   void logMessages(Level level, String message, int count) {
     for (int i = 0; i < count; ++i) {
       switch (level.toInt()) {
       case Level.FATAL_INT:
-        logger.fatal(message);
+        LOG.error(FATAL, message);
         break;
       case Level.ERROR_INT:
-        logger.error(message);
+        LOG.error(message);
         break;
       case Level.WARN_INT:
-        logger.warn(message);
+        LOG.warn(message);
         break;
       case Level.INFO_INT:
-        logger.info(message);
+        LOG.info(message);
         break;
       case Level.DEBUG_INT:
-        logger.debug(message);
+        LOG.debug(message);
         break;
       case Level.TRACE_INT:
-        logger.trace(message);
+        LOG.trace(message);
         break;
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 7349d22..56baa89 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -38,8 +38,8 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileUtil;
@@ -61,8 +61,8 @@ import org.junit.Test;
  */
 public class TestProcfsBasedProcessTree {
 
-  private static final Log LOG = LogFactory
-    .getLog(TestProcfsBasedProcessTree.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestProcfsBasedProcessTree.class);
   protected static File TEST_ROOT_DIR = new File("target",
     TestProcfsBasedProcessTree.class.getName() + "-localDir");
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
index 629578f..4b1d792 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
@@ -24,8 +24,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.net.DNSToSwitchMapping;
@@ -37,7 +37,8 @@ import org.junit.Test;
 
 public class TestRackResolver {
 
-  private static Log LOG = LogFactory.getLog(TestRackResolver.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRackResolver.class);
   private static final String invalidHost = "invalidHost";
 
   @Before
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
index d3cba64..db5d4be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -27,8 +27,8 @@ import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
 import static org.junit.Assert.assertTrue;
 
 public class TestWindowsBasedProcessTree {
-  private static final Log LOG = LogFactory
-      .getLog(TestWindowsBasedProcessTree.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestWindowsBasedProcessTree.class);
 
   class WindowsBasedProcessTreeTester extends WindowsBasedProcessTree {
     String infoStr = null;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/05: Revert "HDDS-1072. Implement RetryProxy and FailoverProxy for OM client."

Posted by aa...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b18c1c22ea238c4b783031402496164f0351b531
Author: Hanisha Koneru <ha...@apache.org>
AuthorDate: Fri Mar 1 20:05:12 2019 -0800

    Revert "HDDS-1072. Implement RetryProxy and FailoverProxy for OM client."
    
    This reverts commit 8e1225991d8da7d6801fc3753319139873f23bc9.
---
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |  17 --
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   3 -
 .../common/src/main/resources/ozone-default.xml    |  43 +---
 .../ozone/client/protocol/ClientProtocol.java      |   4 +-
 .../hadoop/ozone/client/rest/RestClient.java       |   4 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  18 +-
 .../hadoop/ozone/client/rpc/ha/OMProxyInfo.java}   |  32 ++-
 .../ozone/client/rpc/ha/OMProxyProvider.java       | 177 ++++++++++++++
 .../hadoop/ozone/client/rpc}/ha/package-info.java  |   2 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       | 266 ---------------------
 .../ozone/om/protocol/OzoneManagerProtocol.java    |   7 -
 ...OzoneManagerProtocolClientSideTranslatorPB.java | 105 +-------
 .../src/main/proto/OzoneManagerProtocol.proto      |   2 -
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       |  38 ++-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |   8 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 184 +++-----------
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   6 -
 .../hadoop/ozone/om/ratis/OMRatisHelper.java       |   9 +-
 .../ozone/om/ratis/OzoneManagerRatisClient.java    |  27 +--
 ...OzoneManagerProtocolServerSideTranslatorPB.java |   3 +-
 .../om/ratis/TestOzoneManagerRatisServer.java      |  23 ++
 21 files changed, 322 insertions(+), 656 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 0d73905..cd40f7c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -379,23 +379,6 @@ public final class OzoneConfigKeys {
   public static final String OZONE_FS_ISOLATED_CLASSLOADER =
       "ozone.fs.isolated-classloader";
 
-  // Ozone Client Retry and Failover configurations
-  public static final String OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY =
-      "ozone.client.retry.max.attempts";
-  public static final int OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT =
-      10;
-  public static final String OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY =
-      "ozone.client.failover.max.attempts";
-  public static final int OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT =
-      15;
-  public static final String OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY =
-      "ozone.client.failover.sleep.base.millis";
-  public static final int OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT =
-      500;
-  public static final String OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY =
-      "ozone.client.failover.sleep.max.millis";
-  public static final int OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT =
-      15000;
 
   public static final String OZONE_FREON_HTTP_ENABLED_KEY =
       "ozone.freon.http.enabled";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8e3b02a..45b46b8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -276,7 +276,4 @@ public final class OzoneConsts {
 
   // Default OMServiceID for OM Ratis servers to use as RaftGroupId
   public static final String OM_SERVICE_ID_DEFAULT = "omServiceIdDefault";
-
-  // Dummy OMNodeID for OM Clients to use for a non-HA OM setup
-  public static final String OM_NODE_ID_DUMMY = "omNodeIdDummy";
 }
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index f7fecb7..8469fdc 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2029,45 +2029,4 @@
     </description>
   </property>
 
-  <property>
-    <name>ozone.client.retry.max.attempts</name>
-    <value>10</value>
-    <description>
-      Max retry attempts for Ozone RpcClient talking to OzoneManagers.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.failover.max.attempts</name>
-    <value>15</value>
-    <description>
-      Expert only. The number of client failover attempts that should be
-      made before the failover is considered failed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.failover.sleep.base.millis</name>
-    <value>500</value>
-    <description>
-      Expert only. The time to wait, in milliseconds, between failover
-      attempts increases exponentially as a function of the number of
-      attempts made so far, with a random factor of +/- 50%. This option
-      specifies the base value used in the failover calculation. The
-      first failover will retry immediately. The 2nd failover attempt
-      will delay at least ozone.client.failover.sleep.base.millis
-      milliseconds. And so on.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.failover.sleep.max.millis</name>
-    <value>15000</value>
-    <description>
-      Expert only. The time to wait, in milliseconds, between failover
-      attempts increases exponentially as a function of the number of
-      attempts made so far, with a random factor of +/- 50%. This option
-      specifies the maximum value to wait between failovers.
-      Specifically, the time between two failover attempts will not
-      exceed +/- 50% of ozone.client.failover.sleep.max.millis
-      milliseconds.
-    </description>
-  </property>
-</configuration>
\ No newline at end of file
+</configuration>
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 2bf9089..494afae 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
 
@@ -510,5 +510,5 @@ public interface ClientProtocol {
   S3SecretValue getS3Secret(String kerberosID) throws IOException;
 
   @VisibleForTesting
-  OMFailoverProxyProvider getOMProxyProvider();
+  OMProxyProvider getOMProxyProvider();
 }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index eea2809..b69d972 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
 import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails;
 import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyProvider;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
@@ -725,7 +725,7 @@ public class RestClient implements ClientProtocol {
   }
 
   @Override
-  public OMFailoverProxyProvider getOMProxyProvider() {
+  public OMProxyProvider getOMProxyProvider() {
     return null;
   }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 4b44770..0875046 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.ozone.client.io.LengthInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -66,8 +66,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.om.protocolPB
-    .OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -87,7 +85,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.io.Text;
 import org.apache.logging.log4j.util.Strings;
-import org.apache.ratis.protocol.ClientId;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -110,6 +107,7 @@ public class RpcClient implements ClientProtocol {
   private final OzoneConfiguration conf;
   private final StorageContainerLocationProtocol
       storageContainerLocationClient;
+  private final OMProxyProvider omProxyProvider;
   private final OzoneManagerProtocol ozoneManagerClient;
   private final XceiverClientManager xceiverClientManager;
   private final int chunkSize;
@@ -123,7 +121,6 @@ public class RpcClient implements ClientProtocol {
   private final long streamBufferMaxSize;
   private final long blockSize;
   private final long watchTimeout;
-  private final ClientId clientId = ClientId.randomId();
 
    /**
     * Creates RpcClient instance with the given configuration.
@@ -140,8 +137,11 @@ public class RpcClient implements ClientProtocol {
         OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT);
     RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
         ProtobufRpcEngine.class);
-    this.ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
-        this.conf, clientId.toString(), ugi);
+    this.omProxyProvider = new OMProxyProvider(conf, ugi);
+    this.ozoneManagerClient =
+        TracingUtil.createProxy(
+            this.omProxyProvider.getProxy(),
+            OzoneManagerProtocol.class);
 
     long scmVersion =
         RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
@@ -492,8 +492,8 @@ public class RpcClient implements ClientProtocol {
 
   @Override
   @VisibleForTesting
-  public OMFailoverProxyProvider getOMProxyProvider() {
-    return ozoneManagerClient.getOMFailoverProxyProvider();
+  public OMProxyProvider getOMProxyProvider() {
+    return omProxyProvider;
   }
 
   @Override
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyInfo.java
similarity index 53%
copy from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
copy to hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyInfo.java
index a95f09f..01e5562 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyInfo.java
@@ -16,8 +16,34 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.om.ha;
+package org.apache.hadoop.ozone.client.rpc.ha;
+
+import org.apache.hadoop.ozone.om.protocolPB
+    .OzoneManagerProtocolClientSideTranslatorPB;
+
+import java.net.InetSocketAddress;
 
 /**
- * This package contains Ozone Client's OM Proxy classes.
- */
\ No newline at end of file
+ * Proxy information of OM.
+ */
+public final class OMProxyInfo {
+  private InetSocketAddress address;
+  private OzoneManagerProtocolClientSideTranslatorPB omClient;
+
+  public OMProxyInfo(InetSocketAddress addr) {
+    this.address = addr;
+  }
+
+  public InetSocketAddress getAddress() {
+    return address;
+  }
+
+  public OzoneManagerProtocolClientSideTranslatorPB getOMProxy() {
+    return omClient;
+  }
+
+  public void setOMProxy(
+      OzoneManagerProtocolClientSideTranslatorPB clientProxy) {
+    this.omClient = clientProxy;
+  }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyProvider.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyProvider.java
new file mode 100644
index 0000000..574cb5f
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/OMProxyProvider.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc.ha;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.protocolPB
+    .OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ratis.protocol.ClientId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
+
+/**
+ * A failover proxy provider implementation which allows clients to configure
+ * multiple OMs to connect to. In case of OM failover, client can try
+ * connecting to another OM node from the list of proxies.
+ */
+public class OMProxyProvider implements Closeable {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OMProxyProvider.class);
+
+  private List<OMProxyInfo> omProxies;
+
+  private int currentProxyIndex = 0;
+
+  private final Configuration conf;
+  private final long omVersion;
+  private final UserGroupInformation ugi;
+  private ClientId clientId = ClientId.randomId();
+
+  public OMProxyProvider(Configuration configuration,
+      UserGroupInformation ugi) {
+    this.conf = configuration;
+    this.omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+    this.ugi = ugi;
+    loadOMClientConfigs(conf);
+  }
+
+  private void loadOMClientConfigs(Configuration config) {
+    this.omProxies = new ArrayList<>();
+
+    Collection<String> omServiceIds = config.getTrimmedStringCollection(
+        OZONE_OM_SERVICE_IDS_KEY);
+
+    if (omServiceIds.size() > 1) {
+      throw new IllegalArgumentException("Multi-OM Services is not supported." +
+          " Please configure only one OM Service ID in " +
+          OZONE_OM_SERVICE_IDS_KEY);
+    }
+
+    for (String serviceId : OmUtils.emptyAsSingletonNull(omServiceIds)) {
+      Collection<String> omNodeIds = OmUtils.getOMNodeIds(config, serviceId);
+
+      for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
+
+        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+            serviceId, nodeId);
+        String rpcAddrStr = OmUtils.getOmRpcAddress(config, rpcAddrKey);
+        if (rpcAddrStr == null) {
+          continue;
+        }
+
+        InetSocketAddress addr = NetUtils.createSocketAddr(rpcAddrStr);
+
+        // Add the OM client proxy info to list of proxies
+        if (addr != null) {
+          OMProxyInfo omProxyInfo = new OMProxyInfo(addr);
+          omProxies.add(omProxyInfo);
+        } else {
+          LOG.error("Failed to create OM proxy at address {}", rpcAddrStr);
+        }
+      }
+    }
+
+    if (omProxies.isEmpty()) {
+      throw new IllegalArgumentException("Could not find any configured " +
+          "addresses for OM. Please configure the system with "
+          + OZONE_OM_ADDRESS_KEY);
+    }
+  }
+
+  private OzoneManagerProtocolClientSideTranslatorPB getOMClient(
+      InetSocketAddress omAddress) throws IOException {
+    return new OzoneManagerProtocolClientSideTranslatorPB(
+        RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi,
+            conf, NetUtils.getDefaultSocketFactory(conf),
+            Client.getRpcTimeout(conf)), clientId.toString());
+  }
+
+  /**
+   * Get the proxy object which should be used until the next failover event
+   * occurs. RPC proxy object is intialized lazily.
+   * @return the OM proxy object to invoke methods upon
+   */
+  public synchronized OzoneManagerProtocolClientSideTranslatorPB getProxy() {
+    OMProxyInfo currentOMProxyInfo = omProxies.get(currentProxyIndex);
+    return createOMClientIfNeeded(currentOMProxyInfo);
+  }
+
+  private OzoneManagerProtocolClientSideTranslatorPB createOMClientIfNeeded(
+      OMProxyInfo proxyInfo) {
+    if (proxyInfo.getOMProxy() == null) {
+      try {
+        proxyInfo.setOMProxy(getOMClient(proxyInfo.getAddress()));
+      } catch (IOException ioe) {
+        LOG.error("{} Failed to create RPC proxy to OM at {}",
+            this.getClass().getSimpleName(), proxyInfo.getAddress(), ioe);
+        throw new RuntimeException(ioe);
+      }
+    }
+    return proxyInfo.getOMProxy();
+  }
+
+  /**
+   * Called whenever an error warrants failing over. It is determined by the
+   * retry policy.
+   */
+  public void performFailover() {
+    incrementProxyIndex();
+  }
+
+  synchronized void incrementProxyIndex() {
+    currentProxyIndex = (currentProxyIndex + 1) % omProxies.size();
+  }
+
+  /**
+   * Close all the proxy objects which have been opened over the lifetime of
+   * the proxy provider.
+   */
+  @Override
+  public synchronized void close() throws IOException {
+    for (OMProxyInfo proxy : omProxies) {
+      OzoneManagerProtocolClientSideTranslatorPB omProxy = proxy.getOMProxy();
+      if (omProxy != null) {
+        RPC.stopProxy(omProxy);
+      }
+    }
+  }
+
+  @VisibleForTesting
+  public List<OMProxyInfo> getOMProxies() {
+    return omProxies;
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/package-info.java
similarity index 94%
rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
rename to hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/package-info.java
index a95f09f..df0e69c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/ha/package-info.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.om.ha;
+package org.apache.hadoop.ozone.client.rpc.ha;
 
 /**
  * This package contains Ozone Client's OM Proxy classes.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
deleted file mode 100644
index f5fdf6f..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ /dev/null
@@ -1,266 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.ha;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.io.retry.FailoverProxyProvider;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
-
-/**
- * A failover proxy provider implementation which allows clients to configure
- * multiple OMs to connect to. In case of OM failover, client can try
- * connecting to another OM node from the list of proxies.
- */
-public class OMFailoverProxyProvider implements
-    FailoverProxyProvider<OzoneManagerProtocolPB>, Closeable {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(OMFailoverProxyProvider.class);
-
-  // Map of OMNodeID to its proxy
-  private Map<String, OMProxyInfo> omProxies;
-  private List<String> omNodeIDList;
-
-  private String currentProxyOMNodeId;
-  private int currentProxyIndex;
-
-  private final Configuration conf;
-  private final long omVersion;
-  private final UserGroupInformation ugi;
-
-  public OMFailoverProxyProvider(OzoneConfiguration configuration,
-      UserGroupInformation ugi) throws IOException {
-    this.conf = configuration;
-    this.omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    this.ugi = ugi;
-    loadOMClientConfigs(conf);
-
-    currentProxyIndex = 0;
-    currentProxyOMNodeId = omNodeIDList.get(currentProxyIndex);
-  }
-
-  /**
-   * Class to store proxy information.
-   */
-  public final class OMProxyInfo
-      extends FailoverProxyProvider.ProxyInfo<OzoneManagerProtocolPB> {
-    private InetSocketAddress address;
-
-    OMProxyInfo(OzoneManagerProtocolPB proxy, String proxyInfoStr,
-        InetSocketAddress addr) {
-      super(proxy, proxyInfoStr);
-      this.address = addr;
-    }
-
-    public InetSocketAddress getAddress() {
-      return address;
-    }
-  }
-
-  private void loadOMClientConfigs(Configuration config) throws IOException {
-    this.omProxies = new HashMap<>();
-    this.omNodeIDList = new ArrayList<>();
-
-    Collection<String> omServiceIds = config.getTrimmedStringCollection(
-        OZONE_OM_SERVICE_IDS_KEY);
-
-    if (omServiceIds.size() > 1) {
-      throw new IllegalArgumentException("Multi-OM Services is not supported." +
-          " Please configure only one OM Service ID in " +
-          OZONE_OM_SERVICE_IDS_KEY);
-    }
-
-    for (String serviceId : OmUtils.emptyAsSingletonNull(omServiceIds)) {
-      Collection<String> omNodeIds = OmUtils.getOMNodeIds(config, serviceId);
-
-      for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
-
-        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
-            serviceId, nodeId);
-        String rpcAddrStr = OmUtils.getOmRpcAddress(config, rpcAddrKey);
-        if (rpcAddrStr == null) {
-          continue;
-        }
-
-        InetSocketAddress addr = NetUtils.createSocketAddr(rpcAddrStr);
-
-        // Add the OM client proxy info to list of proxies
-        if (addr != null) {
-          StringBuilder proxyInfo = new StringBuilder()
-              .append(nodeId).append("(")
-              .append(NetUtils.getHostPortString(addr)).append(")");
-          OMProxyInfo omProxyInfo = new OMProxyInfo(null,
-              proxyInfo.toString(), addr);
-
-          // For a non-HA OM setup, nodeId might be null. If so, we assign it
-          // a dummy value
-          if (nodeId == null) {
-            nodeId = OzoneConsts.OM_NODE_ID_DUMMY;
-          }
-          omProxies.put(nodeId, omProxyInfo);
-          omNodeIDList.add(nodeId);
-
-        } else {
-          LOG.error("Failed to create OM proxy for {} at address {}",
-              nodeId, rpcAddrStr);
-        }
-      }
-    }
-
-    if (omProxies.isEmpty()) {
-      throw new IllegalArgumentException("Could not find any configured " +
-          "addresses for OM. Please configure the system with "
-          + OZONE_OM_ADDRESS_KEY);
-    }
-  }
-
-  @VisibleForTesting
-  public synchronized String getCurrentProxyOMNodeId() {
-    return currentProxyOMNodeId;
-  }
-
-  private OzoneManagerProtocolPB createOMProxy(InetSocketAddress omAddress)
-      throws IOException {
-    return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi,
-        conf, NetUtils.getDefaultSocketFactory(conf),
-        Client.getRpcTimeout(conf));
-  }
-
-  /**
-   * Get the proxy object which should be used until the next failover event
-   * occurs. RPC proxy object is intialized lazily.
-   * @return the OM proxy object to invoke methods upon
-   */
-  @Override
-  public synchronized OMProxyInfo getProxy() {
-    OMProxyInfo currentOMProxyInfo = omProxies.get(currentProxyOMNodeId);
-    createOMProxyIfNeeded(currentOMProxyInfo);
-    return currentOMProxyInfo;
-  }
-
-  /**
-   * Creates OM proxy object if it does not already exist.
-   */
-  private OMProxyInfo createOMProxyIfNeeded(OMProxyInfo proxyInfo) {
-    if (proxyInfo.proxy == null) {
-      try {
-        proxyInfo.proxy = createOMProxy(proxyInfo.address);
-      } catch (IOException ioe) {
-        LOG.error("{} Failed to create RPC proxy to OM at {}",
-            this.getClass().getSimpleName(), proxyInfo.address, ioe);
-        throw new RuntimeException(ioe);
-      }
-    }
-    return proxyInfo;
-  }
-
-  /**
-   * Called whenever an error warrants failing over. It is determined by the
-   * retry policy.
-   */
-  @Override
-  public void performFailover(OzoneManagerProtocolPB currentProxy) {
-    int newProxyIndex = incrementProxyIndex();
-    LOG.debug("Failing over OM proxy to index: {}, nodeId: {}",
-        newProxyIndex, omNodeIDList.get(newProxyIndex));
-  }
-
-  /**
-   * Update the proxy index to the next proxy in the list.
-   * @return the new proxy index
-   */
-  private synchronized int incrementProxyIndex() {
-    currentProxyIndex = (currentProxyIndex + 1) % omProxies.size();
-    currentProxyOMNodeId = omNodeIDList.get(currentProxyIndex);
-    return currentProxyIndex;
-  }
-
-  @Override
-  public Class<OzoneManagerProtocolPB> getInterface() {
-    return OzoneManagerProtocolPB.class;
-  }
-
-  /**
-   * Performs failover if the leaderOMNodeId returned through OMReponse does
-   * not match the current leaderOMNodeId cached by the proxy provider.
-   */
-  public void performFailoverIfRequired(String newLeaderOMNodeId) {
-    if (updateLeaderOMNodeId(newLeaderOMNodeId)) {
-      LOG.debug("Failing over OM proxy to nodeId: {}", newLeaderOMNodeId);
-    }
-  }
-
-  /**
-   * Failover to the OM proxy specified by the new leader OMNodeId.
-   * @param newLeaderOMNodeId OMNodeId to failover to.
-   * @return true if failover is successful, false otherwise.
-   */
-  synchronized boolean updateLeaderOMNodeId(String newLeaderOMNodeId) {
-    if (!currentProxyOMNodeId.equals(newLeaderOMNodeId)) {
-      if (omProxies.containsKey(newLeaderOMNodeId)) {
-        currentProxyOMNodeId = newLeaderOMNodeId;
-        currentProxyIndex = omNodeIDList.indexOf(currentProxyOMNodeId);
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Close all the proxy objects which have been opened over the lifetime of
-   * the proxy provider.
-   */
-  @Override
-  public synchronized void close() throws IOException {
-    for (OMProxyInfo proxy : omProxies.values()) {
-      OzoneManagerProtocolPB omProxy = proxy.proxy;
-      if (omProxy != null) {
-        RPC.stopProxy(omProxy);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public List<OMProxyInfo> getOMProxies() {
-    return new ArrayList<>(omProxies.values());
-  }
-}
-
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index 54f4e82..1573682 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 package org.apache.hadoop.ozone.om.protocol;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
@@ -385,11 +384,5 @@ public interface OzoneManagerProtocol
    * @throws IOException
    */
   S3SecretValue getS3Secret(String kerberosID) throws IOException;
-
-  /**
-   * Get the OM Client's Retry and Failover Proxy provider.
-   * @return OMFailoverProxyProvider
-   */
-  OMFailoverProxyProvider getOMFailoverProxyProvider();
 }
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index ff7a1d8..51ce94f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -17,26 +17,18 @@
  */
 package org.apache.hadoop.ozone.om.protocolPB;
 
-import java.io.EOFException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.stream.Collectors;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
 import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -111,7 +103,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
@@ -121,9 +112,6 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED;
@@ -144,89 +132,20 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
    */
   private static final RpcController NULL_RPC_CONTROLLER = null;
 
-  private final OMFailoverProxyProvider omFailoverProxyProvider;
   private final OzoneManagerProtocolPB rpcProxy;
   private final String clientID;
-  private static final Logger FAILOVER_PROXY_PROVIDER_LOG =
-      LoggerFactory.getLogger(OMFailoverProxyProvider.class);
-
-  public OzoneManagerProtocolClientSideTranslatorPB(
-      OzoneManagerProtocolPB proxy, String clientId) {
-    this.rpcProxy = proxy;
-    this.clientID = clientId;
-    this.omFailoverProxyProvider = null;
-  }
 
   /**
-   * Constructor for OM Protocol Client. This creates a {@link RetryProxy}
-   * over {@link OMFailoverProxyProvider} proxy. OMFailoverProxyProvider has
-   * one {@link OzoneManagerProtocolPB} proxy pointing to each OM node in the
-   * cluster.
+   * Constructor for KeySpaceManger Client.
+   * @param rpcProxy
    */
-  public OzoneManagerProtocolClientSideTranslatorPB(OzoneConfiguration conf,
-      String clientId, UserGroupInformation ugi) throws IOException {
-    this.omFailoverProxyProvider = new OMFailoverProxyProvider(conf, ugi);
-
-    int maxRetries = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
-    int maxFailovers = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
-    int sleepBase = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
-    int sleepMax = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
-
-    this.rpcProxy = createRetryProxy(omFailoverProxyProvider,
-        maxRetries, maxFailovers, sleepBase, sleepMax);
+  public OzoneManagerProtocolClientSideTranslatorPB(
+      OzoneManagerProtocolPB rpcProxy, String clientId) {
+    this.rpcProxy = rpcProxy;
     this.clientID = clientId;
   }
 
   /**
-   * Creates a {@link RetryProxy} encapsulating the
-   * {@link OMFailoverProxyProvider}. The retry proxy fails over on network
-   * exception or if the current proxy is not the leader OM.
-   */
-  private OzoneManagerProtocolPB createRetryProxy(
-      OMFailoverProxyProvider failoverProxyProvider,
-      int maxRetries, int maxFailovers, int delayMillis, int maxDelayBase) {
-    RetryPolicy retryPolicyOnNetworkException = RetryPolicies
-        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-            maxFailovers, maxRetries, delayMillis, maxDelayBase);
-    RetryPolicy retryPolicy = new RetryPolicy() {
-      @Override
-      public RetryAction shouldRetry(Exception exception, int retries,
-          int failovers, boolean isIdempotentOrAtMostOnce)
-          throws Exception {
-        if (exception instanceof EOFException ||
-            exception instanceof  ServiceException) {
-          if (retries < maxRetries && failovers < maxFailovers) {
-            return RetryAction.FAILOVER_AND_RETRY;
-          } else {
-            FAILOVER_PROXY_PROVIDER_LOG.error("Failed to connect to OM. " +
-                "Attempted {} retries and {} failovers", retries, failovers);
-            return RetryAction.FAIL;
-          }
-        } else {
-          return retryPolicyOnNetworkException.shouldRetry(
-                  exception, retries, failovers, isIdempotentOrAtMostOnce);
-        }
-      }
-    };
-    OzoneManagerProtocolPB proxy = (OzoneManagerProtocolPB) RetryProxy.create(
-        OzoneManagerProtocolPB.class, failoverProxyProvider, retryPolicy);
-    return proxy;
-  }
-
-  @VisibleForTesting
-  public OMFailoverProxyProvider getOMFailoverProxyProvider() {
-    return omFailoverProxyProvider;
-  }
-
-  /**
    * Closes this stream and releases any system resources associated
    * with it. If the stream is already closed then invoking this
    * method has no effect.
@@ -277,19 +196,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
       OMRequest payload = OMRequest.newBuilder(omRequest)
           .setTraceID(TracingUtil.exportCurrentSpan())
           .build();
-
-      OMResponse omResponse =
-          rpcProxy.submitRequest(NULL_RPC_CONTROLLER, payload);
-
-      if (omResponse.hasLeaderOMNodeId() && omFailoverProxyProvider != null) {
-        String leaderOmId = omResponse.getLeaderOMNodeId();
-
-        // Failover to the OM node returned by OMReponse leaderOMNodeId if
-        // current proxy is not pointing to that node.
-        omFailoverProxyProvider.performFailoverIfRequired(leaderOmId);
-      }
-
-      return omResponse;
+      return rpcProxy.submitRequest(NULL_RPC_CONTROLLER, payload);
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index b116826..aaf3c85 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -140,8 +140,6 @@ message OMResponse {
 
   required Status status = 5;
 
-  optional string leaderOMNodeId = 6;
-
   optional CreateVolumeResponse              createVolumeResponse          = 11;
   optional SetVolumePropertyResponse         setVolumePropertyResponse     = 12;
   optional CheckVolumeAccessResponse         checkVolumeAccessResponse     = 13;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index f84f95e..a1ef1f6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -32,9 +32,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.net.BindException;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
@@ -50,7 +48,6 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
   private static final Logger LOG =
       LoggerFactory.getLogger(MiniOzoneHAClusterImpl.class);
 
-  private Map<String, OzoneManager> ozoneManagerMap;
   private List<OzoneManager> ozoneManagers;
 
   private static final Random RANDOM = new Random();
@@ -66,12 +63,11 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
 
   private MiniOzoneHAClusterImpl(
       OzoneConfiguration conf,
-      Map<String, OzoneManager> omMap,
+      List<OzoneManager> omList,
       StorageContainerManager scm,
       List<HddsDatanodeService> hddsDatanodes) {
     super(conf, scm, hddsDatanodes);
-    this.ozoneManagerMap = omMap;
-    this.ozoneManagers = new ArrayList<>(omMap.values());
+    this.ozoneManagers = omList;
   }
 
   /**
@@ -111,10 +107,6 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
     ozoneManagers.get(index).stop();
   }
 
-  public void stopOzoneManager(String omNodeId) {
-    ozoneManagerMap.get(omNodeId).stop();
-  }
-
   /**
    * Builder for configuring the MiniOzoneCluster to run.
    */
@@ -136,17 +128,17 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
       DefaultMetricsSystem.setMiniClusterMode(true);
       initializeConfiguration();
       StorageContainerManager scm;
-      Map<String, OzoneManager> omMap;
+      List<OzoneManager> omList;
       try {
         scm = createSCM();
         scm.start();
-        omMap = createOMService();
+        omList = createOMService();
       } catch (AuthenticationException ex) {
         throw new IOException("Unable to build MiniOzoneCluster. ", ex);
       }
 
       final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
-      MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf, omMap,
+      MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf, omList,
           scm, hddsDatanodes);
       if (startDataNodes) {
         cluster.startHddsDatanodes();
@@ -179,10 +171,10 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
      * @throws IOException
      * @throws AuthenticationException
      */
-    private Map<String, OzoneManager> createOMService() throws IOException,
+    private List<OzoneManager> createOMService() throws IOException,
         AuthenticationException {
 
-      Map<String, OzoneManager> omMap = new HashMap<>();
+      List<OzoneManager> omList = new ArrayList<>(numOfOMs);
 
       int retryCount = 0;
       int basePort = 10000;
@@ -194,11 +186,10 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
 
           for (int i = 1; i<= numOfOMs; i++) {
             // Set nodeId
-            String nodeId = nodeIdBaseStr + i;
-            conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeId);
+            conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeIdBaseStr + i);
 
             // Set metadata/DB dir base path
-            String metaDirPath = path + "/" + nodeId;
+            String metaDirPath = path + "/" + nodeIdBaseStr + i;
             conf.set(OZONE_METADATA_DIRS, metaDirPath);
             OMStorage omStore = new OMStorage(conf);
             initializeOmStorage(omStore);
@@ -210,7 +201,7 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
 
             OzoneManager om = OzoneManager.createOm(null, conf);
             om.setCertClient(certClient);
-            omMap.put(nodeId, om);
+            omList.add(om);
 
             om.start();
             LOG.info("Started OzoneManager RPC server at " +
@@ -220,24 +211,23 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
           // Set default OM address to point to the first OM. Clients would
           // try connecting to this address by default
           conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
-              NetUtils.getHostPortString(omMap.get(nodeIdBaseStr + 1)
-                  .getOmRpcServerAddr()));
+              NetUtils.getHostPortString(omList.get(0).getOmRpcServerAddr()));
 
           break;
         } catch (BindException e) {
-          for (OzoneManager om : omMap.values()) {
+          for (OzoneManager om : omList) {
             om.stop();
             om.join();
             LOG.info("Stopping OzoneManager server at " +
                 om.getOmRpcServerAddr());
           }
-          omMap.clear();
+          omList.clear();
           ++retryCount;
           LOG.info("MiniOzoneHACluster port conflicts, retried " +
               retryCount + " times");
         }
       }
-      return omMap;
+      return omList;
     }
 
     /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 0828fe8..32792ae 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -65,6 +65,8 @@ import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyInfo;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyProvider;
 import org.apache.hadoop.ozone.common.OzoneChecksumException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -74,7 +76,6 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocat
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -188,10 +189,9 @@ public abstract class TestOzoneRpcClientAbstract {
    */
   @Test
   public void testOMClientProxyProvider() {
-    OMFailoverProxyProvider omFailoverProxyProvider = store.getClientProxy()
+    OMProxyProvider omProxyProvider = store.getClientProxy()
         .getOMProxyProvider();
-    List<OMFailoverProxyProvider.OMProxyInfo> omProxies =
-        omFailoverProxyProvider.getOMProxies();
+    List<OMProxyInfo> omProxies = omProxyProvider.getOMProxies();
 
     // For a non-HA OM service, there should be only one OM proxy.
     Assert.assertEquals(1, omProxies.size());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
index da8f870..62cda91 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
@@ -18,29 +18,30 @@ package org.apache.hadoop.ozone.om;
 
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyInfo;
+import org.apache.hadoop.ozone.client.rpc.ha.OMProxyProvider;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.response.VolumeInfo;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
-import java.net.ConnectException;
 import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.UUID;
@@ -49,14 +50,6 @@ import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl
     .NODE_FAILURE_TIMEOUT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
     .OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 
 /**
@@ -65,7 +58,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
 public class TestOzoneManagerHA {
 
   private MiniOzoneHAClusterImpl cluster = null;
-  private ObjectStore objectStore;
+  private StorageHandler storageHandler;
+  private UserArgs userArgs;
   private OzoneConfiguration conf;
   private String clusterId;
   private String scmId;
@@ -75,7 +69,7 @@ public class TestOzoneManagerHA {
   public ExpectedException exception = ExpectedException.none();
 
   @Rule
-  public Timeout timeout = new Timeout(120_000);
+  public Timeout timeout = new Timeout(60_000);
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -91,9 +85,6 @@ public class TestOzoneManagerHA {
     scmId = UUID.randomUUID().toString();
     conf.setBoolean(OZONE_ACL_ENABLED, true);
     conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    conf.setInt(OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY, 3);
-    conf.setInt(OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 3);
-    conf.setInt(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY, 50);
 
     cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
         .setClusterId(clusterId)
@@ -102,7 +93,9 @@ public class TestOzoneManagerHA {
         .setNumOfOzoneManagers(numOfOMs)
         .build();
     cluster.waitForClusterToBeReady();
-    objectStore = OzoneClientFactory.getRpcClient(conf).getObjectStore();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
   }
 
   /**
@@ -122,7 +115,7 @@ public class TestOzoneManagerHA {
    */
   @Test
   public void testAllOMNodesRunning() throws Exception {
-    createVolumeTest(true);
+    testCreateVolume(true);
   }
 
   /**
@@ -133,56 +126,52 @@ public class TestOzoneManagerHA {
     cluster.stopOzoneManager(1);
     Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
 
-    createVolumeTest(true);
+    testCreateVolume(true);
   }
 
   /**
    * Test client request fails when 2 OMs are down.
    */
   @Test
+  @Ignore("TODO:HDDS-1158")
   public void testTwoOMNodesDown() throws Exception {
     cluster.stopOzoneManager(1);
     cluster.stopOzoneManager(2);
     Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
 
-    createVolumeTest(false);
+    testCreateVolume(false);
   }
 
   /**
    * Create a volume and test its attribute.
    */
-  private void createVolumeTest(boolean checkSuccess) throws Exception {
+  private void testCreateVolume(boolean checkSuccess) throws Exception {
     String userName = "user" + RandomStringUtils.randomNumeric(5);
     String adminName = "admin" + RandomStringUtils.randomNumeric(5);
     String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
 
-    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder()
-        .setOwner(userName)
-        .setAdmin(adminName)
-        .build();
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
 
     try {
-      objectStore.createVolume(volumeName, createVolumeArgs);
+      storageHandler.createVolume(createVolumeArgs);
 
-      OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
+      VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+      VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
 
       if (checkSuccess) {
-        Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
-        Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
-        Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
+        Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
+        Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
       } else {
         // Verify that the request failed
+        Assert.assertTrue(retVolumeinfo.getVolumeName().isEmpty());
         Assert.fail("There is no quorum. Request should have failed");
       }
-    } catch (ConnectException | RemoteException e) {
+    } catch (OMException e) {
       if (!checkSuccess) {
-        // If the last OM to be tried by the RetryProxy is down, we would get
-        // ConnectException. Otherwise, we would get a RemoteException from the
-        // last running OM as it would fail to get a quorum.
-        if (e instanceof RemoteException) {
-          GenericTestUtils.assertExceptionContains(
-              "RaftRetryFailureException", e);
-        }
+        GenericTestUtils.assertExceptionContains(
+            "RaftRetryFailureException", e);
       } else {
         throw e;
       }
@@ -190,16 +179,14 @@ public class TestOzoneManagerHA {
   }
 
   /**
-   * Test that OMFailoverProxyProvider creates an OM proxy for each OM in the
-   * cluster.
+   * Test that OMProxyProvider creates an OM proxy for each OM in the cluster.
    */
   @Test
-  public void testOMProxyProviderInitialization() throws Exception {
+  public void testOMClientProxyProvide() throws Exception {
     OzoneClient rpcClient = cluster.getRpcClient();
-    OMFailoverProxyProvider omFailoverProxyProvider =
+    OMProxyProvider omProxyProvider =
         rpcClient.getObjectStore().getClientProxy().getOMProxyProvider();
-    List<OMFailoverProxyProvider.OMProxyInfo> omProxies =
-        omFailoverProxyProvider.getOMProxies();
+    List<OMProxyInfo> omProxies = omProxyProvider.getOMProxies();
 
     Assert.assertEquals(numOfOMs, omProxies.size());
 
@@ -207,7 +194,7 @@ public class TestOzoneManagerHA {
       InetSocketAddress omRpcServerAddr =
           cluster.getOzoneManager(i).getOmRpcServerAddr();
       boolean omClientProxyExists = false;
-      for (OMFailoverProxyProvider.OMProxyInfo omProxyInfo : omProxies) {
+      for (OMProxyInfo omProxyInfo : omProxies) {
         if (omProxyInfo.getAddress().equals(omRpcServerAddr)) {
           omClientProxyExists = true;
           break;
@@ -218,99 +205,4 @@ public class TestOzoneManagerHA {
           omClientProxyExists);
     }
   }
-
-  /**
-   * Test OMFailoverProxyProvider failover on connection exception to OM client.
-   */
-  @Test
-  public void testOMProxyProviderFailoverOnConnectionFailure()
-      throws Exception {
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        objectStore.getClientProxy().getOMProxyProvider();
-    String firstProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    createVolumeTest(true);
-
-    // On stopping the current OM Proxy, the next connection attempt should
-    // failover to a another OM proxy.
-    cluster.stopOzoneManager(firstProxyNodeId);
-    Thread.sleep(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT * 4);
-
-    // Next request to the proxy provider should result in a failover
-    createVolumeTest(true);
-    Thread.sleep(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
-
-    // Get the new OM Proxy NodeId
-    String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // Verify that a failover occured. the new proxy nodeId should be
-    // different from the old proxy nodeId.
-    Assert.assertNotEquals("Failover did not occur as expected",
-        firstProxyNodeId, newProxyNodeId);
-  }
-
-  /**
-   * Test OMFailoverProxyProvider failover when current OM proxy is not
-   * the current OM Leader.
-   */
-  @Test
-  public void testOMProxyProviderFailoverToCurrentLeader() throws Exception {
-    OMFailoverProxyProvider omFailoverProxyProvider =
-        objectStore.getClientProxy().getOMProxyProvider();
-
-    // Run couple of createVolume tests to discover the current Leader OM
-    createVolumeTest(true);
-    createVolumeTest(true);
-
-    // The OMFailoverProxyProvider will point to the current leader OM node.
-    String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // Perform a manual failover of the proxy provider to move the
-    // currentProxyIndex to a node other than the leader OM.
-    omFailoverProxyProvider.performFailover(
-        omFailoverProxyProvider.getProxy().proxy);
-
-    String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
-    Assert.assertNotEquals(leaderOMNodeId, newProxyNodeId);
-
-    // Once another request is sent to this new proxy node, the leader
-    // information must be returned via the response and a failover must
-    // happen to the leader proxy node.
-    createVolumeTest(true);
-    Thread.sleep(2000);
-
-    String newLeaderOMNodeId =
-        omFailoverProxyProvider.getCurrentProxyOMNodeId();
-
-    // The old and new Leader OM NodeId must match since there was no new
-    // election in the Ratis ring.
-    Assert.assertEquals(leaderOMNodeId, newLeaderOMNodeId);
-  }
-
-  @Test
-  public void testOMRetryProxy() throws Exception {
-    // Stop all the OMs. After making 5 (set maxRetries value) attempts at
-    // connection, the RpcClient should give up.
-    for (int i = 0; i < numOfOMs; i++) {
-      cluster.stopOzoneManager(i);
-    }
-
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final org.apache.log4j.Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
-
-    try {
-      createVolumeTest(true);
-      Assert.fail("TestOMRetryProxy should fail when there are no OMs running");
-    } catch (ConnectException e) {
-      // Each retry attempt tries upto 10 times to connect. So there should be
-      // 3*10 "Retrying connect to server" messages
-      Assert.assertEquals(30,
-          appender.countLinesWithMessage("Retrying connect to server:"));
-
-      Assert.assertEquals(1,
-          appender.countLinesWithMessage("Failed to connect to OM. Attempted " +
-              "3 retries and 3 failovers"));
-    }
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index cacdca8..ff94935 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -70,7 +70,6 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
 import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.security.OzoneSecurityException;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -2604,11 +2603,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     return peerNodes;
   }
 
-  @Override
-  public OMFailoverProxyProvider getOMFailoverProxyProvider() {
-    return null;
-  }
-
   @VisibleForTesting
   public CertificateClient getCertificateClient() {
     return certClient;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java
index 8e4582d..9115421 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisHelper.java
@@ -32,7 +32,6 @@ import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.grpc.GrpcConfigKeys;
 import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientReply;
 import org.apache.ratis.protocol.RaftGroup;
 import org.apache.ratis.protocol.RaftPeerId;
 import org.apache.ratis.retry.RetryPolicy;
@@ -101,12 +100,10 @@ public final class OMRatisHelper {
     return Message.valueOf(ByteString.copyFrom(requestBytes));
   }
 
-  static OMResponse getOMResponseFromRaftClientReply(RaftClientReply reply)
+  static OMResponse convertByteStringToOMResponse(ByteString byteString)
       throws InvalidProtocolBufferException {
-    byte[] bytes = reply.getMessage().getContent().toByteArray();
-    return OMResponse.newBuilder(OMResponse.parseFrom(bytes))
-        .setLeaderOMNodeId(reply.getReplierId())
-        .build();
+    byte[] bytes = byteString.toByteArray();
+    return OMResponse.parseFrom(bytes);
   }
 
   static OMResponse getErrorResponse(Type cmdType, Exception e) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
index 1b4c634..9e1cafc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
@@ -23,10 +23,7 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
-
 import com.google.protobuf.InvalidProtocolBufferException;
-import com.google.protobuf.ServiceException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -56,24 +53,24 @@ public final class OzoneManagerRatisClient implements Closeable {
       OzoneManagerRatisClient.class);
 
   private final RaftGroup raftGroup;
-  private final String omNodeID;
+  private final String omID;
   private final RpcType rpcType;
   private RaftClient raftClient;
   private final RetryPolicy retryPolicy;
   private final Configuration conf;
 
-  private OzoneManagerRatisClient(String omNodeId, RaftGroup raftGroup,
+  private OzoneManagerRatisClient(String omId, RaftGroup raftGroup,
       RpcType rpcType, RetryPolicy retryPolicy,
       Configuration config) {
     this.raftGroup = raftGroup;
-    this.omNodeID = omNodeId;
+    this.omID = omId;
     this.rpcType = rpcType;
     this.retryPolicy = retryPolicy;
     this.conf = config;
   }
 
   public static OzoneManagerRatisClient newOzoneManagerRatisClient(
-      String omNodeId, RaftGroup raftGroup, Configuration conf) {
+      String omId, RaftGroup raftGroup, Configuration conf) {
     final String rpcType = conf.get(
         OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_KEY,
         OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT);
@@ -90,19 +87,19 @@ public final class OzoneManagerRatisClient implements Closeable {
     final RetryPolicy retryPolicy = RetryPolicies
         .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration);
 
-    return new OzoneManagerRatisClient(omNodeId, raftGroup,
+    return new OzoneManagerRatisClient(omId, raftGroup,
         SupportedRpcType.valueOfIgnoreCase(rpcType), retryPolicy, conf);
   }
 
   public void connect() {
     LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}",
-        raftGroup.getGroupId().getUuid().toString(), omNodeID);
+        raftGroup.getGroupId().getUuid().toString(), omID);
 
     // TODO : XceiverClient ratis should pass the config value of
     // maxOutstandingRequests so as to set the upper bound on max no of async
     // requests to be handled by raft client
 
-    raftClient = OMRatisHelper.newRaftClient(rpcType, omNodeID, raftGroup,
+    raftClient = OMRatisHelper.newRaftClient(rpcType, omID, raftGroup,
         retryPolicy, conf);
   }
 
@@ -122,12 +119,13 @@ public final class OzoneManagerRatisClient implements Closeable {
    * @param request Request
    * @return Response to the command
    */
-  public OMResponse sendCommand(OMRequest request) throws ServiceException {
+  public OMResponse sendCommand(OMRequest request) {
     try {
       CompletableFuture<OMResponse> reply = sendCommandAsync(request);
       return reply.get();
     } catch (ExecutionException | InterruptedException e) {
-      throw new ServiceException(e);
+      LOG.error("Failed to execute command: " + request, e);
+      return OMRatisHelper.getErrorResponse(request.getCmdType(), e);
     }
   }
 
@@ -154,10 +152,9 @@ public final class OzoneManagerRatisClient implements Closeable {
                 if (raftRetryFailureException != null) {
                   throw new CompletionException(raftRetryFailureException);
                 }
-
                 OMResponse response = OMRatisHelper
-                    .getOMResponseFromRaftClientReply(reply);
-
+                    .convertByteStringToOMResponse(reply.getMessage()
+                        .getContent());
                 return response;
               } catch (InvalidProtocolBufferException e) {
                 throw new CompletionException(e);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 2f1d64d8..5684fa5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -80,8 +80,7 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
   /**
    * Submits request to OM's Ratis server.
    */
-  private OMResponse submitRequestToRatis(OMRequest request)
-      throws ServiceException {
+  private OMResponse submitRequestToRatis(OMRequest request) {
     return omRatisClient.sendCommand(request);
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
index 8a8be35..83d2245 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMNodeDetails;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.ratis.protocol.RaftGroupId;
@@ -108,6 +110,27 @@ public class TestOzoneManagerRatisServer {
   }
 
   /**
+   * Submit any request to OM Ratis server and check that the dummy response
+   * message is received.
+   */
+  @Test
+  public void testSubmitRatisRequest() throws Exception {
+    // Wait for leader election
+    Thread.sleep(LEADER_ELECTION_TIMEOUT * 2);
+    OMRequest request = OMRequest.newBuilder()
+        .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
+        .setClientId(clientId)
+        .build();
+
+    OMResponse response = omRatisClient.sendCommand(request);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Type.CreateVolume,
+        response.getCmdType());
+    Assert.assertEquals(false, response.getSuccess());
+    Assert.assertEquals(false, response.hasCreateVolumeResponse());
+  }
+
+  /**
    * Test that all of {@link OzoneManagerProtocolProtos.Type} enum values are
    * categorized in {@link OmUtils#isReadOnly(OMRequest)}.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 03/05: YARN-9332. RackResolver tool should accept multiple hosts. Contributed by Lantao Jin.

Posted by aa...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e20b5ef52cbc4f9f3bd6cdc36cb359eece63bfbc
Author: Weiwei Yang <ww...@apache.org>
AuthorDate: Sat Mar 2 23:19:17 2019 +0800

    YARN-9332. RackResolver tool should accept multiple hosts. Contributed by Lantao Jin.
---
 .../org/apache/hadoop/yarn/util/RackResolver.java  | 74 +++++++++++++++++++---
 .../apache/hadoop/yarn/util/TestRackResolver.java  | 60 ++++++++++++++++++
 2 files changed, 126 insertions(+), 8 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
index a854701..07ddbe9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
@@ -18,9 +18,11 @@
 
 package org.apache.hadoop.yarn.util;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import com.google.common.base.Strings;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
@@ -87,6 +89,20 @@ public final class RackResolver {
   }
 
   /**
+   * Utility method for getting a list of hostname resolved to a list of node
+   *  in the network topology. This method initializes the class with the
+   * right resolver implementation.
+   * @param conf
+   * @param hostNames
+   * @return nodes {@link Node} after resolving the hostnames
+   */
+  public static List<Node> resolve(
+      Configuration conf, List<String> hostNames) {
+    init(conf);
+    return coreResolve(hostNames);
+  }
+
+  /**
    * Utility method for getting a hostname resolved to a node in the
    * network topology. This method doesn't initialize the class.
    * Call {@link #init(Configuration)} explicitly.
@@ -100,18 +116,50 @@ public final class RackResolver {
     return coreResolve(hostName);
   }
 
+  /**
+   * Utility method for getting a list of hostname resolved to a list of node
+   *  in the network topology. This method doesn't initialize the class.
+   * Call {@link #init(Configuration)} explicitly.
+   * @param hostNames
+   * @return nodes {@link Node} after resolving the hostnames
+   */
+  public static List<Node> resolve(List<String> hostNames) {
+    if (!initCalled) {
+      throw new IllegalStateException("RackResolver class " +
+          "not yet initialized");
+    }
+    return coreResolve(hostNames);
+  }
+
   private static Node coreResolve(String hostName) {
     List <String> tmpList = Collections.singletonList(hostName);
-    List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
-    String rName = NetworkTopology.DEFAULT_RACK;
-    if (rNameList == null || rNameList.get(0) == null) {
-      LOG.debug("Could not resolve {}. Falling back to {}", hostName,
-            NetworkTopology.DEFAULT_RACK);
+    return coreResolve(tmpList).get(0);
+  }
+
+  private static List<Node> coreResolve(List<String> hostNames) {
+    List<Node> nodes = new ArrayList<Node>(hostNames.size());
+    List<String> rNameList = dnsToSwitchMapping.resolve(hostNames);
+    if (rNameList == null || rNameList.isEmpty()) {
+      for (String hostName : hostNames) {
+        nodes.add(new NodeBase(hostName, NetworkTopology.DEFAULT_RACK));
+      }
+      LOG.info("Got an error when resolve hostNames. Falling back to "
+          + NetworkTopology.DEFAULT_RACK + " for all.");
     } else {
-      rName = rNameList.get(0);
-      LOG.debug("Resolved {} to {}", hostName, rName);
+      for (int i = 0; i < hostNames.size(); i++) {
+        if (Strings.isNullOrEmpty(rNameList.get(i))) {
+          // fallback to use default rack
+          nodes.add(new NodeBase(hostNames.get(i),
+              NetworkTopology.DEFAULT_RACK));
+          LOG.debug("Could not resolve {}. Falling back to {}",
+              hostNames.get(i), NetworkTopology.DEFAULT_RACK);
+        } else {
+          nodes.add(new NodeBase(hostNames.get(i), rNameList.get(i)));
+          LOG.debug("Resolved {} to {}", hostNames.get(i), rNameList.get(i));
+        }
+      }
     }
-    return new NodeBase(hostName, rName);
+    return nodes;
   }
 
   /**
@@ -122,4 +170,14 @@ public final class RackResolver {
   static DNSToSwitchMapping getDnsToSwitchMapping() {
     return dnsToSwitchMapping;
   }
+
+  /**
+   * Only used by tests.
+   */
+  @Private
+  @VisibleForTesting
+  static void reset() {
+    initCalled = false;
+    dnsToSwitchMapping = null;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
index 70ca23c..629578f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.util;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -31,6 +32,7 @@ import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 public class TestRackResolver {
@@ -38,6 +40,10 @@ public class TestRackResolver {
   private static Log LOG = LogFactory.getLog(TestRackResolver.class);
   private static final String invalidHost = "invalidHost";
 
+  @Before
+  public void setUp() {
+    RackResolver.reset();
+  }
 
   public static final class MyResolver implements DNSToSwitchMapping {
 
@@ -81,6 +87,44 @@ public class TestRackResolver {
     }
   }
 
+  /**
+   * This class is to test the resolve method which accepts a list of hosts
+   * in RackResolver.
+   */
+  public static final class MultipleResolver implements DNSToSwitchMapping {
+
+    @Override
+    public List<String> resolve(List<String> hostList) {
+      List<String> returnList = new ArrayList<String>();
+      if (hostList.isEmpty()) {
+        return returnList;
+      }
+      for (String host : hostList) {
+        if (host.equals(invalidHost)) {
+          // Simulate condition where resolving host returns empty string
+          returnList.add("");
+        }
+        LOG.info("Received resolve request for " + host);
+        if (host.startsWith("host")) {
+          returnList.add("/" + host.replace("host", "rack"));
+        }
+        // I should not be reached again as RackResolver is supposed to do
+        // caching.
+      }
+      Assert.assertEquals(returnList.size(), hostList.size());
+      return returnList;
+    }
+
+    @Override
+    public void reloadCachedMappings() {
+      // nothing to do here, since RawScriptBasedMapping has no cache.
+    }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+    }
+  }
+
   @Test
   public void testCaching() {
     Configuration conf = new Configuration();
@@ -102,4 +146,20 @@ public class TestRackResolver {
     Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
   }
 
+  @Test
+  public void testMultipleHosts() {
+    Configuration conf = new Configuration();
+    conf.setClass(
+        CommonConfigurationKeysPublic
+            .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+        MultipleResolver.class,
+        DNSToSwitchMapping.class);
+    RackResolver.init(conf);
+    List<Node> nodes = RackResolver.resolve(
+        Arrays.asList("host1", invalidHost, "host2"));
+    Assert.assertEquals("/rack1", nodes.get(0).getNetworkLocation());
+    Assert.assertEquals(NetworkTopology.DEFAULT_RACK,
+        nodes.get(1).getNetworkLocation());
+    Assert.assertEquals("/rack2", nodes.get(2).getNetworkLocation());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 04/05: Revert "HDFS-14261. Kerberize JournalNodeSyncer unit test. Contributed by Siyao Meng."

Posted by aa...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6c4d566955084f7ea5e8b0208db7d54c6ae52ef1
Author: Wei-Chiu Chuang <we...@apache.org>
AuthorDate: Sat Mar 2 17:02:26 2019 -0800

    Revert "HDFS-14261. Kerberize JournalNodeSyncer unit test. Contributed by Siyao Meng."
    
    This reverts commit 5c10630ad8c976380491adec8e2d9f3e49ea8fa9.
---
 .../hdfs/qjournal/server/TestJournalNodeSync.java  | 90 +---------------------
 1 file changed, 2 insertions(+), 88 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
index 2f145e8..c23604b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.qjournal.server;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -30,25 +29,16 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
-import org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.hdfs.server.namenode.FileJournalManager
     .getLogFile;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
@@ -56,7 +46,6 @@ import org.junit.rules.TestName;
 import java.io.File;
 import java.io.IOException;
 import java.util.List;
-import java.util.Properties;
 import java.util.Random;
 
 /**
@@ -73,85 +62,12 @@ public class TestJournalNodeSync {
   private int activeNNindex=0;
   private static final int DFS_HA_TAILEDITS_PERIOD_SECONDS=1;
 
-  private static HdfsConfiguration baseConf;
-  private static File baseDir;
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static MiniKdc kdc;
-
   @Rule
   public TestName testName = new TestName();
 
-  @BeforeClass
-  public static void init() throws Exception {
-    // Init Kerberos
-    baseDir =
-        GenericTestUtils.getTestDir(TestSecureNNWithQJM.class.getSimpleName());
-    FileUtil.fullyDelete(baseDir);
-    Assert.assertTrue(baseDir.mkdirs());
-
-    Properties kdcConf = MiniKdc.createConf();
-    kdc = new MiniKdc(kdcConf, baseDir);
-    kdc.start();
-
-    baseConf = new HdfsConfiguration();
-    SecurityUtil.setAuthenticationMethod(
-        UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
-    UserGroupInformation.setConfiguration(baseConf);
-    Assert.assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
-
-    String userName = UserGroupInformation.getLoginUser().getShortUserName();
-    File keytabFile = new File(baseDir, userName + ".keytab");
-    String keytab = keytabFile.getAbsolutePath();
-    // Windows will not reverse name lookup "127.0.0.1" to "localhost".
-    String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
-    kdc.createPrincipal(keytabFile,
-        userName + "/" + krbInstance,
-        "HTTP/" + krbInstance);
-    String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm();
-    String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();
-
-    baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
-    baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
-    baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
-    baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
-    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
-        spnegoPrincipal);
-    baseConf.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, keytab);
-    baseConf.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
-    baseConf.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
-        spnegoPrincipal);
-    baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
-    baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
-    baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
-    baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
-    baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
-    baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
-    baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
-
-    keystoresDir = baseDir.getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestSecureNNWithQJM.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
-    baseConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    baseConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass
-  public static void destroy() throws Exception {
-    if (kdc != null) {
-      kdc.stop();
-    }
-    FileUtil.fullyDelete(baseDir);
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
   @Before
-  public void setUpMiniCluster() throws Exception {
-    conf = new HdfsConfiguration(baseConf);
+  public void setUpMiniCluster() throws IOException {
+    conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, true);
     conf.setLong(DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY, 1000L);
     if (testName.getMethodName().equals(
@@ -602,8 +518,6 @@ public class TestJournalNodeSync {
    * @return the startTxId of next segment after rolling edits.
    */
   private long generateEditLog(int numEdits) throws IOException {
-    // rollEditLog first due to OP_UPDATE_MASTER_KEY
-    dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
     long lastWrittenTxId = dfsCluster.getNameNode(activeNNindex).getFSImage()
         .getEditLog().getLastWrittenTxId();
     for (int i = 1; i <= numEdits; i++) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org