You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by mi...@apache.org on 2019/06/11 12:38:39 UTC

[phoenix] branch 4.x-HBase-1.5 updated: [PHOENIX-5228] use slf4j for logging in phoenix project

This is an automated email from the ASF dual-hosted git repository.

mihir6692 pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
     new 8210033  [PHOENIX-5228] use slf4j for logging in phoenix project
8210033 is described below

commit 82100338a9ad4c3854c07b4b56bc3c1904045cd2
Author: Xinyi <xy...@salesforce.com>
AuthorDate: Tue Jun 11 18:05:35 2019 +0530

    [PHOENIX-5228] use slf4j for logging in phoenix project
---
 .../wal/WALRecoveryRegionPostOpenIT.java           | 12 ++--
 ...WALReplayWithIndexWritesAndCompressedWALIT.java | 12 ++--
 .../src/it/java/org/apache/phoenix/Sandbox.java    |  4 +-
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 14 ++--
 .../apache/phoenix/end2end/End2EndTestDriver.java  |  4 +-
 .../apache/phoenix/end2end/OrphanViewToolIT.java   |  4 +-
 .../index/IndexRebuildIncrementDisableCountIT.java | 12 ++--
 .../index/InvalidIndexStateClientSideIT.java       |  9 ++-
 .../phoenix/end2end/index/MutableIndexIT.java      |  4 +-
 .../end2end/index/MutableIndexReplicationIT.java   | 24 +++----
 .../end2end/index/PartialIndexRebuilderIT.java     |  4 +-
 .../index/FailForUnsupportedHBaseVersionsIT.java   |  8 +--
 .../phoenix/jdbc/SecureUserConnectionsIT.java      |  8 +--
 .../monitoring/GlobalPhoenixMetricsTestSink.java   |  3 +-
 .../phoenix/monitoring/PhoenixMetricsIT.java       | 12 ++--
 .../apache/phoenix/query/ConnectionCachingIT.java  |  6 +-
 .../phoenix/schema/stats/BaseStatsCollectorIT.java |  8 +--
 .../phoenix/schema/stats/NoOpStatsCollectorIT.java | 10 +--
 .../apache/phoenix/trace/BaseTracingTestIT.java    | 10 +--
 .../phoenix/trace/PhoenixTracingEndToEndIT.java    | 36 +++++-----
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      | 10 +--
 .../IndexHalfStoreFileReaderGenerator.java         | 10 +--
 .../wal/BinaryCompatibleBaseDecoder.java           | 12 ++--
 .../apache/phoenix/cache/ServerCacheClient.java    | 20 +++---
 .../java/org/apache/phoenix/call/CallRunner.java   |  8 +--
 .../coprocessor/MetaDataRegionObserver.java        | 77 ++++++++++------------
 .../coprocessor/PhoenixAccessController.java       | 22 +++----
 .../phoenix/coprocessor/ScanRegionObserver.java    | 14 ++--
 .../phoenix/coprocessor/TaskRegionObserver.java    | 23 ++-----
 .../coprocessor/tasks/DropChildViewsTask.java      | 14 ++--
 .../coprocessor/tasks/IndexRebuildTask.java        | 15 +++--
 .../org/apache/phoenix/execute/BaseQueryPlan.java  | 14 ++--
 .../org/apache/phoenix/execute/HashJoinPlan.java   | 12 ++--
 .../expression/function/CollationKeyFunction.java  | 26 ++++----
 .../org/apache/phoenix/hbase/index/Indexer.java    | 42 ++++++------
 .../apache/phoenix/hbase/index/LockManager.java    |  8 +--
 .../hbase/index/builder/BaseIndexBuilder.java      |  8 +--
 .../hbase/index/builder/IndexBuildManager.java     |  6 +-
 .../hbase/index/covered/NonTxIndexBuilder.java     | 10 +--
 .../hbase/index/covered/data/IndexMemStore.java    | 24 +++----
 .../hbase/index/parallel/BaseTaskRunner.java       | 10 +--
 .../index/parallel/QuickFailingTaskRunner.java     |  6 +-
 .../phoenix/hbase/index/parallel/TaskBatch.java    |  8 +--
 .../hbase/index/parallel/ThreadPoolBuilder.java    | 10 +--
 .../hbase/index/parallel/ThreadPoolManager.java    | 14 ++--
 .../hbase/index/util/IndexManagementUtil.java      | 10 +--
 .../phoenix/hbase/index/write/IndexWriter.java     | 12 ++--
 .../hbase/index/write/IndexWriterUtils.java        |  6 +-
 .../index/write/KillServerOnFailurePolicy.java     | 10 +--
 .../index/write/ParallelWriterIndexCommitter.java  | 18 ++---
 .../hbase/index/write/RecoveryIndexWriter.java     | 12 ++--
 .../TrackingParallelWriterIndexCommitter.java      | 18 ++---
 .../phoenix/index/PhoenixIndexFailurePolicy.java   | 28 ++++----
 .../phoenix/index/PhoenixTransactionalIndexer.java | 10 +--
 .../apache/phoenix/iterate/SnapshotScanner.java    | 12 ++--
 .../iterate/TableSnapshotResultIterator.java       |  8 +--
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java | 26 ++++----
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |  8 +--
 .../java/org/apache/phoenix/log/QueryLogger.java   | 12 ++--
 .../apache/phoenix/log/QueryLoggerDisruptor.java   | 10 +--
 .../org/apache/phoenix/log/TableLogWriter.java     |  8 +--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java    | 26 ++++----
 .../mapreduce/FormatToBytesWritableMapper.java     |  4 +-
 .../phoenix/mapreduce/FormatToKeyValueReducer.java |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  | 10 +--
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |  8 +--
 .../phoenix/mapreduce/PhoenixInputFormat.java      | 20 +++---
 .../phoenix/mapreduce/PhoenixOutputFormat.java     |  8 +--
 .../phoenix/mapreduce/PhoenixRecordReader.java     | 18 ++---
 .../phoenix/mapreduce/PhoenixRecordWriter.java     | 12 ++--
 .../PhoenixServerBuildIndexInputFormat.java        |  8 +--
 .../phoenix/mapreduce/PhoenixTextInputFormat.java  |  6 +-
 .../phoenix/mapreduce/RegexToKeyValueMapper.java   |  4 +-
 .../mapreduce/index/DirectHTableWriter.java        |  8 +--
 .../mapreduce/index/IndexScrutinyMapper.java       | 10 +--
 .../phoenix/mapreduce/index/IndexScrutinyTool.java | 22 +++----
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 24 +++----
 .../phoenix/mapreduce/index/IndexToolUtil.java     |  4 +-
 .../index/PhoenixIndexImportDirectMapper.java      | 10 +--
 .../index/PhoenixIndexImportDirectReducer.java     |  4 +-
 .../mapreduce/index/PhoenixIndexImportMapper.java  |  6 +-
 .../index/PhoenixIndexPartialBuildMapper.java      | 10 +--
 .../index/PhoenixServerBuildIndexMapper.java       |  2 +-
 .../index/automation/PhoenixMRJobSubmitter.java    | 32 ++++-----
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 16 ++---
 .../java/org/apache/phoenix/metrics/Metrics.java   | 12 ++--
 .../phoenix/monitoring/GlobalClientMetrics.java    |  4 +-
 .../monitoring/GlobalMetricRegistriesAdapter.java  | 16 ++---
 .../schema/stats/DefaultStatisticsCollector.java   | 26 ++++----
 .../phoenix/schema/stats/StatisticsScanner.java    | 28 ++++----
 .../phoenix/schema/stats/UpdateStatisticsTool.java | 14 ++--
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 28 ++++----
 .../apache/phoenix/trace/PhoenixMetricsSink.java   | 22 +++----
 .../java/org/apache/phoenix/trace/TraceReader.java | 14 ++--
 .../apache/phoenix/trace/TraceSpanReceiver.java    | 14 ++--
 .../java/org/apache/phoenix/trace/TraceWriter.java | 32 ++++-----
 .../org/apache/phoenix/trace/util/Tracing.java     | 10 +--
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |  8 +--
 .../phoenix/util/EquiDepthStreamHistogram.java     | 14 ++--
 .../org/apache/phoenix/util/PhoenixMRJobUtil.java  | 24 +++----
 .../java/org/apache/phoenix/util/QueryUtil.java    | 10 +--
 .../java/org/apache/phoenix/util/ServerUtil.java   |  8 +--
 .../org/apache/phoenix/util/UpsertExecutor.java    |  4 +-
 .../phoenix/util/ZKBasedMasterElectionUtil.java    | 16 ++---
 .../apache/phoenix/util/csv/CsvUpsertExecutor.java |  6 +-
 .../phoenix/util/json/JsonUpsertExecutor.java      |  6 +-
 .../phoenix/util/regex/RegexUpsertExecutor.java    |  6 +-
 .../phoenix/hbase/index/IndexTestingUtils.java     | 12 ++--
 .../apache/phoenix/hbase/index/StubAbortable.java  |  8 +--
 .../phoenix/hbase/index/write/TestIndexWriter.java | 14 ++--
 .../hbase/index/write/TestParalleIndexWriter.java  | 10 +--
 .../write/TestParalleWriterIndexCommitter.java     | 10 +--
 .../hbase/index/write/TestWALRecoveryCaching.java  | 34 +++++-----
 .../org/apache/phoenix/metrics/LoggingSink.java    | 12 ++--
 .../tool/ParameterizedPhoenixCanaryToolIT.java     |  9 +--
 .../CoprocessorHConnectionTableFactoryTest.java    |  8 +--
 .../java/org/apache/phoenix/util/TestUtil.java     | 10 +--
 .../pherf/workload/MultithreadedDiffer.java        |  7 +-
 .../apache/phoenix/pherf/XMLConfigParserTest.java  |  4 +-
 .../pherf/result/impl/XMLResultHandlerTest.java    |  4 +-
 .../apache/phoenix/tracingwebapp/http/Main.java    |  6 +-
 121 files changed, 780 insertions(+), 799 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
index d74ddb2..f63d03c 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
@@ -37,8 +37,6 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -73,6 +71,8 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
@@ -80,7 +80,7 @@ import com.google.common.collect.Multimap;
 @Category(NeedsOwnMiniClusterTest.class)
 public class WALRecoveryRegionPostOpenIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(WALRecoveryRegionPostOpenIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WALRecoveryRegionPostOpenIT.class);
 
     private static final String DATA_TABLE_NAME="DATA_POST_OPEN";
 
@@ -143,10 +143,10 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
         @Override
         public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws IOException
         {
-            LOG.info("Found index update failure!");
+            LOGGER.info("Found index update failure!");
             handleFailureCalledCount++;
             tableReferenceToMutation=attempted;
-            LOG.info("failed index update on WAL recovery - allowing index table can be write.");
+            LOGGER.info("failed index update on WAL recovery - allowing index table can be write.");
             failIndexTableWrite=false;
             super.handleFailure(attempted, cause);
 
@@ -261,7 +261,7 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
             resultScanner = primaryTable.getScanner(scan);
             count = 0;
             for (Result result : resultScanner) {
-                LOG.info("Got data table result:" + result);
+                LOGGER.info("Got data table result:" + result);
                 count++;
             }
             assertEquals("Got an unexpected found of data rows", 1, count);
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 49933b2..eec8f0c 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -26,8 +26,6 @@ import static org.mockito.Mockito.when;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -68,6 +66,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * For pre-0.94.9 instances, this class tests correctly deserializing WALEdits w/o compression. Post
@@ -84,7 +84,7 @@ import org.mockito.Mockito;
 @Category(NeedsOwnMiniClusterTest.class)
 public class WALReplayWithIndexWritesAndCompressedWALIT {
 
-  public static final Log LOG = LogFactory.getLog(WALReplayWithIndexWritesAndCompressedWALIT.class);
+  public static final Logger LOGGER = LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
   @Rule
   public TableName table = new TableName();
   private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
@@ -141,7 +141,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     UTIL.startMiniZKCluster();
 
     Path hbaseRootDir = UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
-    LOG.info("hbase.rootdir=" + hbaseRootDir);
+    LOGGER.info("hbase.rootdir=" + hbaseRootDir);
     UTIL.getConfiguration().set(HConstants.HBASE_DIR, hbaseRootDir.toString());
     UTIL.startMiniHBaseCluster(1, 1);
   }
@@ -289,7 +289,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     assertEquals("splits=" + splits, 1, splits.size());
     // Make sure the file exists
     assertTrue(fs.exists(splits.get(0)));
-    LOG.info("Split file=" + splits.get(0));
+    LOGGER.info("Split file=" + splits.get(0));
     return splits.get(0);
   }
 
@@ -302,7 +302,7 @@ private int getKeyValueCount(HTable table) throws IOException {
     int count = 0;
     for (Result res : results) {
       count += res.list().size();
-      LOG.debug(count + ") " + res);
+      LOGGER.debug(count + ") " + res);
     }
     results.close();
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index b7bc107..ec4e920 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
  */
 public class Sandbox {
 
-    private static final Logger LOG = LoggerFactory.getLogger(Sandbox.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Sandbox.class);
 
     public static void main(String[] args) throws Exception {
         System.out.println("Starting Phoenix sandbox");
@@ -50,7 +50,7 @@ public class Sandbox {
                         testUtil.shutdownMiniCluster();
                     }
                 } catch (Exception e) {
-                    LOG.error("Exception caught when shutting down mini cluster", e);
+                    LOGGER.error("Exception caught when shutting down mini cluster", e);
                 }
             }
         });
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index cc2549d..c4e3907 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -18,8 +18,6 @@ package org.apache.phoenix.end2end;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -47,6 +45,8 @@ import org.junit.FixMethodOrder;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
@@ -74,7 +74,7 @@ import static org.junit.Assert.fail;
 @FixMethodOrder(MethodSorters.NAME_ASCENDING)
 public abstract class BasePermissionsIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(BasePermissionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BasePermissionsIT.class);
 
     static String SUPER_USER = System.getProperty("user.name");
 
@@ -280,7 +280,7 @@ public abstract class BasePermissionsIT extends BaseTest {
                     for(String tableOrSchema : tableOrSchemaList) {
                         String grantStmtSQL = "GRANT '" + actions + "' ON " + (isSchema ? " SCHEMA " : " TABLE ") + tableOrSchema + " TO "
                                 + ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                        LOG.info("Grant Permissions SQL: " + grantStmtSQL);
+                        LOGGER.info("Grant Permissions SQL: " + grantStmtSQL);
                         assertFalse(stmt.execute(grantStmtSQL));
                     }
                 }
@@ -295,7 +295,7 @@ public abstract class BasePermissionsIT extends BaseTest {
             public Object run() throws Exception {
                 try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
                     String grantStmtSQL = "GRANT '" + actions + "' TO " + " '" + user.getShortName() + "'";
-                    LOG.info("Grant Permissions SQL: " + grantStmtSQL);
+                    LOGGER.info("Grant Permissions SQL: " + grantStmtSQL);
                     assertFalse(stmt.execute(grantStmtSQL));
                 }
                 return null;
@@ -317,7 +317,7 @@ public abstract class BasePermissionsIT extends BaseTest {
                     for(String tableOrSchema : tableOrSchemaList) {
                         String revokeStmtSQL = "REVOKE ON " + (isSchema ? " SCHEMA " : " TABLE ") + tableOrSchema + " FROM "
                                 + ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                        LOG.info("Revoke Permissions SQL: " + revokeStmtSQL);
+                        LOGGER.info("Revoke Permissions SQL: " + revokeStmtSQL);
                         assertFalse(stmt.execute(revokeStmtSQL));
                     }
                 }
@@ -333,7 +333,7 @@ public abstract class BasePermissionsIT extends BaseTest {
                 try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
                     String revokeStmtSQL = "REVOKE FROM " +
                             ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                    LOG.info("Revoke Permissions SQL: " + revokeStmtSQL);
+                    LOGGER.info("Revoke Permissions SQL: " + revokeStmtSQL);
                     assertFalse(stmt.execute(revokeStmtSQL));
                 }
                 return null;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
index feb506f..07c12be 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
  */
 public class End2EndTestDriver extends AbstractHBaseTool {
     
-    private static final Logger LOG = LoggerFactory.getLogger(End2EndTestDriver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(End2EndTestDriver.class);
     private static final String SHORT_REGEX_ARG = "r";
     private static final String SKIP_TESTS = "n";
     
@@ -80,7 +80,7 @@ public class End2EndTestDriver extends AbstractHBaseTool {
         try {
           testFilterRe = Pattern.compile(pattern);
         } catch (PatternSyntaxException e) {
-          LOG.error("Failed to find tests using pattern '" + pattern
+          LOGGER.error("Failed to find tests using pattern '" + pattern
               + "'. Is it a valid Java regular expression?", e);
           throw e;
         }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index ab78ecd..44a8f67 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -54,7 +54,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class OrphanViewToolIT extends ParallelStatsDisabledIT {
-    private static final Logger LOG = LoggerFactory.getLogger(OrphanViewToolIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewToolIT.class);
 
     private final boolean isMultiTenant;
     private final boolean columnEncoded;
@@ -211,7 +211,7 @@ public class OrphanViewToolIT extends ParallelStatsDisabledIT {
         }
         int count = reader.getLineNumber();
         if (count != lineCount)
-            LOG.debug(count + " != " + lineCount);
+            LOGGER.debug(count + " != " + lineCount);
         assertTrue(count == lineCount);
     }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
index 694f359..cf48f5f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -29,8 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTableInterface;
@@ -52,11 +50,13 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
 public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Log LOG = LogFactory.getLog(IndexRebuildIncrementDisableCountIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
     private static long pendingDisableCount = 0;
     private static String ORG_PREFIX = "ORG";
     private static Result pendingDisableCountResult = null;
@@ -124,7 +124,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
             return Bytes.toLong(pendingDisableCountResult.getValue(TABLE_FAMILY_BYTES,
                 PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES));
         } catch (Exception e) {
-            LOG.error("Exception in getPendingDisableCount: " + e);
+            LOGGER.error("Exception in getPendingDisableCount: " + e);
             return 0;
         }
     }
@@ -148,7 +148,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
                         Thread.sleep(100);
                     }
                 } catch (Exception e) {
-                    LOG.error("Error in checkPendingDisableCount : " + e);
+                    LOGGER.error("Error in checkPendingDisableCount : " + e);
                 }
             }
         };
@@ -175,7 +175,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
             }
             conn.commit();
         } catch (Exception e) {
-            LOG.error("Client side exception:" + e);
+            LOGGER.error("Client side exception:" + e);
         }
     }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
index 7052ade..2b39acb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -50,12 +50,11 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Test;
-
-import com.sun.org.apache.commons.logging.Log;
-import com.sun.org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
-    private static final Log LOG = LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
 
     @Test
     public void testCachedConnections() throws Throwable {
@@ -121,7 +120,7 @@ public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
                     }
                 };
         int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
-        LOG.info("Client version: " + version);
+        LOGGER.info("Client version: " + version);
         HTableInterface ht =
                 queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
         try {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 9c88406..48243ba 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -695,13 +695,13 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
                 Threads.sleep(10000);
             }
           } catch (Exception ex) {
-            Log.info(ex);
+              Log.info(ex);
           }
           long waitStartTime = System.currentTimeMillis();
           // wait until merge happened
           while (System.currentTimeMillis() - waitStartTime < 10000) {
             List<HRegionInfo> regions = admin.getTableRegions(indexTable);
-            Log.info("Waiting:" + regions.size());
+              Log.info("Waiting:" + regions.size());
             if (regions.size() < numRegions) {
               break;
             }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index 48265ed..36d35a8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -33,8 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -62,6 +60,8 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -76,7 +76,7 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class MutableIndexReplicationIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(MutableIndexReplicationIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MutableIndexReplicationIT.class);
 
     public static final String SCHEMA_NAME = "";
     public static final String DATA_TABLE_NAME = "T";
@@ -136,7 +136,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         conf1 = utility1.getConfiguration();
         zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
         admin = new ReplicationAdmin(conf1);
-        LOG.info("Setup first Zk");
+        LOGGER.info("Setup first Zk");
 
         // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
         conf2 = HBaseConfiguration.create(conf1);
@@ -153,20 +153,20 @@ public class MutableIndexReplicationIT extends BaseTest {
         //replicate from cluster 1 -> cluster 2, but not back again
         admin.addPeer("1", utility2.getClusterKey());
 
-        LOG.info("Setup second Zk");
+        LOGGER.info("Setup second Zk");
         utility1.startMiniCluster(2);
         utility2.startMiniCluster(2);
     }
 
     private static void setupDriver() throws Exception {
-        LOG.info("Setting up phoenix driver");
+        LOGGER.info("Setting up phoenix driver");
         Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
         // Forces server cache to be used
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
         // Must update config before starting server
         URL = getLocalClusterUrl(utility1);
-        LOG.info("Connecting driver to "+URL);
+        LOGGER.info("Connecting driver to "+URL);
         driver = initAndRegisterTestDriver(URL, new ReadOnlyProps(props.entrySet().iterator()));
     }
 
@@ -205,7 +205,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             //create it as-is on the remote cluster
             admin2.createTable(desc);
 
-            LOG.info("Enabling replication on source table: "+tableName);
+            LOGGER.info("Enabling replication on source table: "+tableName);
             HColumnDescriptor[] cols = desc.getColumnFamilies();
             assertEquals(1, cols.length);
             // add the replication scope to the column
@@ -216,7 +216,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             admin.disableTable(desc.getTableName());
             admin.modifyTable(tableName, desc);
             admin.enableTable(desc.getTableName());
-            LOG.info("Replication enabled on source table: "+tableName);
+            LOGGER.info("Replication enabled on source table: "+tableName);
         }
 
 
@@ -243,7 +243,7 @@ public class MutableIndexReplicationIT extends BaseTest {
 
         // other table can't be reached through Phoenix right now - would need to change how we
         // lookup tables. For right now, we just go through an HTable
-        LOG.info("Looking up tables in replication target");
+        LOGGER.info("Looking up tables in replication target");
         TableName[] tables = admin2.listTableNames();
         HTable remoteTable = new HTable(utility2.getConfiguration(), tables[0]);
         for (int i = 0; i < REPLICATION_RETRIES; i++) {
@@ -254,7 +254,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             if (ensureAnyRows(remoteTable)) {
                 break;
             }
-            LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
+            LOGGER.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
                     + " for edits to get replicated");
             Thread.sleep(REPLICATION_WAIT_TIME_MILLIS);
         }
@@ -267,7 +267,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         ResultScanner scanner = remoteTable.getScanner(scan);
         boolean found = false;
         for (Result r : scanner) {
-            LOG.info("got row: " + r);
+            LOGGER.info("got row: " + r);
             found = true;
         }
         scanner.close();
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index cda282b..6b21815 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -79,7 +79,7 @@ import com.google.common.collect.Maps;
 @SuppressWarnings("deprecation")
 @RunWith(RunUntilFailure.class)
 public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Logger LOG = LoggerFactory.getLogger(PartialIndexRebuilderIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PartialIndexRebuilderIT.class);
     private static final Random RAND = new Random(5);
     private static final int WAIT_AFTER_DISABLED = 5000;
     private static final long REBUILD_PERIOD = 50000;
@@ -137,7 +137,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
                         Thread.interrupted();
                         throw new RuntimeException(e);
                     } catch (SQLException e) {
-                        LOG.error(e.getMessage(),e);
+                        LOGGER.error(e.getMessage(),e);
                     } finally {
                         runRebuildOnce = false;
                     }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index 5916c43..b920bf4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -22,8 +22,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -40,13 +38,15 @@ import org.apache.phoenix.hbase.index.covered.CoveredColumn;
 import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test that we correctly fail for versions of HBase that don't support current properties
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class FailForUnsupportedHBaseVersionsIT {
-    private static final Log LOG = LogFactory.getLog(FailForUnsupportedHBaseVersionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
 
     /**
      * We don't support WAL Compression for HBase &lt; 0.94.9, so we shouldn't even allow the server
@@ -151,7 +151,7 @@ public class FailForUnsupportedHBaseVersionsIT {
                 // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
                 // broken.
                 while (!server.isAborted()) {
-                    LOG.debug("Waiting on regionserver to abort..");
+                    LOGGER.debug("Waiting on regionserver to abort..");
                 }
             }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java
index 1ab54d2..2557ba3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java
@@ -31,8 +31,6 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hbase.security.User;
@@ -49,6 +47,8 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests ConnectionQueryServices caching when Kerberos authentication is enabled. It's not
@@ -58,7 +58,7 @@ import org.junit.experimental.categories.Category;
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class SecureUserConnectionsIT {
-    private static final Log LOG = LogFactory.getLog(SecureUserConnectionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SecureUserConnectionsIT.class);
     private static final int KDC_START_ATTEMPTS = 10;
 
     private static final File TEMP_DIR = new File(getClassTempDir());
@@ -87,7 +87,7 @@ public class SecureUserConnectionsIT {
                 KDC.start();
                 started = true;
             } catch (Exception e) {
-                LOG.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
+                LOGGER.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
             }
         }
         assertTrue("The embedded KDC failed to start successfully after " + KDC_START_ATTEMPTS
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
index c4e677a..10ff2e1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
@@ -18,8 +18,7 @@
 package org.apache.phoenix.monitoring;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 8f1abf0..8d5754f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -61,8 +61,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -75,6 +73,8 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.hamcrest.CoreMatchers;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -89,7 +89,7 @@ import com.google.common.collect.Sets;
  */
 public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMetricsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsIT.class);
 
     @Test
     public void testResetGlobalPhoenixMetrics() throws Exception {
@@ -207,9 +207,9 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
         }
 
         for (int i = 0; i < MAX_RETRIES; i++) {
-            LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " + (i + 1));
+            LOGGER.info("Verifying Global Metrics from Hadoop Sink, Retry: " + (i + 1));
             if (verifyMetricsFromSinkOnce(expectedMetrics)) {
-                LOG.info("Values from Hadoop Metrics Sink match actual values");
+                LOGGER.info("Values from Hadoop Metrics Sink match actual values");
                 return true;
             }
             try {
@@ -231,7 +231,7 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
                         long expectedValue = value;
                         long actualValue = metric.value().longValue();
                         if (expectedValue != actualValue) {
-                            LOG.warn("Metric from Hadoop Sink: " + metric.name() + " didn't match expected.");
+                            LOGGER.warn("Metric from Hadoop Sink: " + metric.name() + " didn't match expected.");
                             return false;
                         }
                         expectedMetrics.remove(metric.name());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
index d1dda04..ec62a42 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class ConnectionCachingIT extends ParallelStatsEnabledIT {
-  private static final Logger LOG = LoggerFactory.getLogger(ConnectionCachingIT.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionCachingIT.class);
 
   @Parameters(name= "phoenix.scanner.lease.renew.enabled={0}")
   public static Iterable<String> data() {
@@ -65,7 +65,7 @@ public class ConnectionCachingIT extends ParallelStatsEnabledIT {
     // The test driver works correctly, the real one doesn't.
     String url = getUrl();
     url = url.replace(";" + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM, "");
-    LOG.info("URL to use is: {}", url);
+    LOGGER.info("URL to use is: {}", url);
 
     Connection conn = DriverManager.getConnection(url, props);
     long before = getNumCachedConnections(conn);
@@ -76,7 +76,7 @@ public class ConnectionCachingIT extends ParallelStatsEnabledIT {
     Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS / 2);
     long after = getNumCachedConnections(conn);
     for (int i = 0; i < 6; i++) {
-      LOG.info("Found {} connections cached", after);
+      LOGGER.info("Found {} connections cached", after);
       if (after <= before) {
         break;
       }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java
index fbd264b..cc92a2c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java
@@ -42,8 +42,6 @@ import java.util.Properties;
 import java.util.Random;
 
 import com.google.common.collect.Lists;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -85,6 +83,8 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -99,7 +99,7 @@ import com.google.common.collect.Maps;
 @RunWith(Parameterized.class)
 public abstract class BaseStatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
 
-    private static final Log LOG = LogFactory.getLog(BaseStatsCollectorIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseStatsCollectorIT.class);
 
     private final String tableDDLOptions;
     private final boolean columnEncoded;
@@ -200,7 +200,7 @@ public abstract class BaseStatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
             if (guidePostWidth != null) {
                 updateStatisticsSql += " SET \"" + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\" = " + guidePostWidth;
             }
-            LOG.info("Running SQL to collect stats: " + updateStatisticsSql);
+            LOGGER.info("Running SQL to collect stats: " + updateStatisticsSql);
             conn.createStatement().execute(updateStatisticsSql);
         }
     }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java
index 87f58d7..a6852f4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java
@@ -18,8 +18,6 @@
 package org.apache.phoenix.schema.stats;
 
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.query.QueryServices;
@@ -32,6 +30,8 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.Array;
 import java.sql.Connection;
@@ -52,7 +52,7 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 @Category(NeedsOwnMiniClusterTest.class)
 public class NoOpStatsCollectorIT extends ParallelStatsDisabledIT {
 
-    private static final Log LOG = LogFactory.getLog(NoOpStatsCollectorIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(NoOpStatsCollectorIT.class);
 
     private String fullTableName;
     private String physicalTableName;
@@ -89,7 +89,7 @@ public class NoOpStatsCollectorIT extends ParallelStatsDisabledIT {
     @Test
     public void testStatsCollectionViaSql() throws SQLException {
         String updateStatisticsSql = "UPDATE STATISTICS " + fullTableName;
-        LOG.info("Running SQL to collect stats: " + updateStatisticsSql);
+        LOGGER.info("Running SQL to collect stats: " + updateStatisticsSql);
         Statement stmt = conn.createStatement();
         try {
             stmt.execute(updateStatisticsSql);
@@ -107,7 +107,7 @@ public class NoOpStatsCollectorIT extends ParallelStatsDisabledIT {
      */
     @Test
     public void testStatsCollectionDuringMajorCompaction() throws Exception {
-        LOG.info("Running major compaction on table: " + physicalTableName);
+        LOGGER.info("Running major compaction on table: " + physicalTableName);
         TestUtil.doMajorCompaction(conn, physicalTableName);
 
         String q1 = "SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + physicalTableName + "'";
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
index 708ecad..8a9f4e0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
@@ -29,8 +29,6 @@ import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.impl.MilliSpan;
@@ -42,6 +40,8 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.After;
 import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Base test for tracing tests - helps manage getting tracing/non-tracing connections, as well as
@@ -50,7 +50,7 @@ import org.junit.Before;
 
 public class BaseTracingTestIT extends ParallelStatsDisabledIT {
 
-    private static final Log LOG = LogFactory.getLog(BaseTracingTestIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseTracingTestIT.class);
 
     protected CountDownLatch latch;
     protected int defaultTracingThreadPoolForTest = 1;
@@ -154,7 +154,7 @@ public class BaseTracingTestIT extends ParallelStatsDisabledIT {
                 }
                 return connection;
             } catch (SQLException e) {
-                LOG.error("New connection failed for tracing Table: " + tableName, e);
+                LOGGER.error("New connection failed for tracing Table: " + tableName, e);
                 return null;
             }
         }
@@ -170,7 +170,7 @@ public class BaseTracingTestIT extends ParallelStatsDisabledIT {
                 executor.shutdownNow();
                 executor.awaitTermination(5, TimeUnit.SECONDS);
             } catch (InterruptedException e) {
-                LOG.error("Failed to stop the thread. ", e);
+                LOGGER.error("Failed to stop the thread. ", e);
             }
         }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 610195a..6557cec 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -31,8 +31,6 @@ import java.util.*;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.htrace.*;
 import org.apache.htrace.impl.ProbabilitySampler;
@@ -43,6 +41,8 @@ import org.apache.phoenix.trace.TraceReader.TraceHolder;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -52,7 +52,7 @@ import com.google.common.collect.ImmutableMap;
 @Ignore("Will need to revisit for new HDFS/HBase/HTrace, broken on 5.x")
 public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixTracingEndToEndIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTracingEndToEndIT.class);
     private static final int MAX_RETRIES = 10;
     private String enabledForLoggingTable;
     private String enableForLoggingIndex;
@@ -70,7 +70,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testWriteSpans() throws Exception {
 
-        LOG.info("testWriteSpans TableName: " + tracingTableName);
+        LOGGER.info("testWriteSpans TableName: " + tracingTableName);
         // watch our sink so we know when commits happen
         latch = new CountDownLatch(1);
 
@@ -134,7 +134,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testClientServerIndexingTracing() throws Exception {
 
-        LOG.info("testClientServerIndexingTracing TableName: " + tracingTableName);
+        LOGGER.info("testClientServerIndexingTracing TableName: " + tracingTableName);
         // one call for client side, one call for server side
         latch = new CountDownLatch(2);
         testTraceWriter.start();
@@ -145,7 +145,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
         // trace the requests we send
         Connection traceable = getTracingConnection();
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = traceable.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -160,7 +160,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         traceable.commit();
 
         // wait for the latch to countdown, as the metrics system is time-based
-        LOG.debug("Waiting for latch to complete!");
+        LOGGER.debug("Waiting for latch to complete!");
         latch.await(200, TimeUnit.SECONDS);// should be way more than GC pauses
 
         // read the traces back out
@@ -213,7 +213,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testScanTracing() throws Exception {
 
-        LOG.info("testScanTracing TableName: " + tracingTableName);
+        LOGGER.info("testScanTracing TableName: " + tracingTableName);
 
         // separate connections to minimize amount of traces that are generated
         Connection traceable = getTracingConnection();
@@ -227,7 +227,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -266,7 +266,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testScanTracingOnServer() throws Exception {
 
-        LOG.info("testScanTracingOnServer TableName: " + tracingTableName);
+        LOGGER.info("testScanTracingOnServer TableName: " + tracingTableName);
 
         // separate connections to minimize amount of traces that are generated
         Connection traceable = getTracingConnection();
@@ -280,7 +280,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -318,7 +318,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testCustomAnnotationTracing() throws Exception {
 
-        LOG.info("testCustomAnnotationTracing TableName: " + tracingTableName);
+        LOGGER.info("testCustomAnnotationTracing TableName: " + tracingTableName);
 
     	final String customAnnotationKey = "myannot";
     	final String customAnnotationValue = "a1";
@@ -335,7 +335,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -421,7 +421,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testSingleSpan() throws Exception {
 
-        LOG.info("testSingleSpan TableName: " + tracingTableName);
+        LOGGER.info("testSingleSpan TableName: " + tracingTableName);
 
         Properties props = new Properties(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -447,7 +447,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testMultipleSpans() throws Exception {
 
-        LOG.info("testMultipleSpans TableName: " + tracingTableName);
+        LOGGER.info("testMultipleSpans TableName: " + tracingTableName);
 
         Connection conn = getConnectionWithoutTracing();
         latch = new CountDownLatch(4);
@@ -511,7 +511,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         Iterator<SpanInfo> spanIter = trace.spans.iterator();
         for (Span span : spans) {
             SpanInfo spanInfo = spanIter.next();
-            LOG.info("Checking span:\n" + spanInfo);
+            LOGGER.info("Checking span:\n" + spanInfo);
 
             long parentId = span.getParentId();
             if(parentId == Span.ROOT_SPAN_ID) {
@@ -552,7 +552,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         outer: while (retries < MAX_RETRIES) {
             Collection<TraceHolder> traces = reader.readAll(100);
             for (TraceHolder trace : traces) {
-                LOG.info("Got trace: " + trace);
+                LOGGER.info("Got trace: " + trace);
                 found = checker.foundTrace(trace);
                 if (found) {
                     break outer;
@@ -564,7 +564,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
                     }
                 }
             }
-            LOG.info("======  Waiting for tracing updates to be propagated ========");
+            LOGGER.info("======  Waiting for tracing updates to be propagated ========");
             Thread.sleep(1000);
             retries++;
         }
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index a697382..bf33992 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
@@ -29,6 +27,8 @@ import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
 import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -38,7 +38,7 @@ import com.google.common.base.Preconditions;
  */
 public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixRpcSchedulerFactory.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
 
     private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
             "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
@@ -51,7 +51,7 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
             // happens in <=0.98.4 where the scheduler factory is not visible
             delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
         } catch (IllegalAccessError e) {
-            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
+            LOGGER.error(VERSION_TOO_OLD_FOR_INDEX_RPC);
             throw e;
         }
 
@@ -64,7 +64,7 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
         // validate index and metadata priorities are not the same
         Preconditions.checkArgument(indexPriority != metadataPriority, "Index and Metadata priority must not be same "+ indexPriority);
-        LOG.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
+        LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
 
         PhoenixRpcScheduler scheduler =
                 new PhoenixRpcScheduler(conf, delegate, indexPriority, metadataPriority);
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 8b9fd15..6f97cba 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -23,8 +23,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
@@ -61,13 +59,15 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.RepairUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
     
     private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
-    public static final Log LOG = LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
 
     @Override
     public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
@@ -194,10 +194,10 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
         if (!store.hasReferences()) {
             InternalScanner repairScanner = null;
             if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
-                LOG.info("we have found inconsistent data for local index for region:"
+                LOGGER.info("we have found inconsistent data for local index for region:"
                         + c.getEnvironment().getRegion().getRegionInfo());
                 if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) {
-                    LOG.info("Starting automatic repair of local Index for region:"
+                    LOGGER.info("Starting automatic repair of local Index for region:"
                             + c.getEnvironment().getRegion().getRegionInfo());
                     repairScanner = getRepairScanner(c.getEnvironment(), store);
                 }
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
index 80f2dd2..e30370f 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
@@ -24,10 +24,10 @@ import java.io.PushbackInputStream;
 
 import javax.annotation.Nonnull;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.codec.Codec;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is a copy paste version of org.apache.hadoop.hbase.codec.BaseDecoder class. 
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.codec.Codec;
  * HBASE-14501. See PHOENIX-2629 and PHOENIX-2636 for details.
  */
 public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
-  protected static final Log LOG = LogFactory.getLog(BinaryCompatibleBaseDecoder.class);
+  protected static final Logger LOGGER = LoggerFactory.getLogger(BinaryCompatibleBaseDecoder.class);
 
   protected final InputStream in;
   private Cell current = null;
@@ -79,11 +79,11 @@ public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
     try {
       isEof = this.in.available() == 0;
     } catch (Throwable t) {
-      LOG.trace("Error getting available for error message - ignoring", t);
+      LOGGER.trace("Error getting available for error message - ignoring", t);
     }
     if (!isEof) throw ioEx;
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Partial cell read caused by EOF", ioEx);
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Partial cell read caused by EOF", ioEx);
     }
     EOFException eofEx = new EOFException("Partial cell read");
     eofEx.initCause(ioEx);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index b6b53df..2a0ecf6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -40,8 +40,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -78,6 +76,8 @@ import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.ByteString;
 
@@ -91,7 +91,7 @@ import com.google.protobuf.ByteString;
 public class ServerCacheClient {
     public static final int UUID_LENGTH = Bytes.SIZEOF_LONG;
     public static final byte[] KEY_IN_FIRST_REGION = new byte[]{0};
-    private static final Log LOG = LogFactory.getLog(ServerCacheClient.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class);
     private static final Random RANDOM = new Random();
 	public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = "hash.join.server.cache.resend.per.server";
     private final PhoenixConnection connection;
@@ -284,7 +284,7 @@ public class ServerCacheClient {
                                 cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                     // Call RPC once per server
                     servers.add(entry);
-                    if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
+                    if (LOGGER.isDebugEnabled()) {LOGGER.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
                     final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                     final HTableInterface htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
                     closeables.add(htable);
@@ -311,7 +311,7 @@ public class ServerCacheClient {
                         }
                     }));
                 } else {
-                    if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));}
+                    if (LOGGER.isDebugEnabled()) {LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));}
                 }
             }
             
@@ -350,7 +350,7 @@ public class ServerCacheClient {
                 }
             }
         }
-        if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));}
+        if (LOGGER.isDebugEnabled()) {LOGGER.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));}
         return hashCacheSpec;
     }
     
@@ -376,8 +376,8 @@ public class ServerCacheClient {
              * through the current metadata boundaries and remove the cache once for each server that we originally sent
              * to.
              */
-            if (LOG.isDebugEnabled()) {
-                LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
             }
             for (HRegionLocation entry : locations) {
              // Call once per server
@@ -420,13 +420,13 @@ public class ServerCacheClient {
                         remainingOnServers.remove(entry);
                     } catch (Throwable t) {
                         lastThrowable = t;
-                        LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
+                        LOGGER.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
                                 t);
                     }
                 }
             }
             if (!remainingOnServers.isEmpty()) {
-                LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
                         lastThrowable);
             }
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
index 7dc90f8..face677 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
@@ -19,8 +19,8 @@ package org.apache.phoenix.call;
 
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class to run a Call with a set of {@link CallWrapper}
@@ -38,7 +38,7 @@ public class CallRunner {
         public V call() throws E;
     }
 
-    private static final Log LOG = LogFactory.getLog(CallRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CallRunner.class);
 
     private CallRunner() {
         // no ctor for util class
@@ -57,7 +57,7 @@ public class CallRunner {
                 try {
                     wrappers[i].after();
                 } catch (Exception e) {
-                    LOG.error("Failed to complete wrapper " + wrappers[i], e);
+                    LOGGER.error("Failed to complete wrapper " + wrappers[i], e);
                 }
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 8c58213..6340f73 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -34,8 +34,6 @@ import java.util.concurrent.TimeUnit;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -53,8 +51,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.compile.MutationPlan;
 import org.apache.phoenix.compile.PostDDLCompiler;
@@ -86,6 +83,8 @@ import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.UpgradeUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
@@ -99,7 +98,7 @@ import com.google.common.collect.Maps;
  */
 @SuppressWarnings("deprecation")
 public class MetaDataRegionObserver extends BaseRegionObserver {
-    public static final Log LOG = LogFactory.getLog(MetaDataRegionObserver.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(MetaDataRegionObserver.class);
     public static final String REBUILD_INDEX_APPEND_TO_URL_STRING = "REBUILDINDEX";
     // PHOENIX-5094 To differentiate the increment in PENDING_DISABLE_COUNT made by client or index
     // rebuilder, we are using large value for index rebuilder
@@ -175,16 +174,16 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         @Override
                         public Void run() throws Exception {
                             if (UpgradeUtil.truncateStats(mTable, sTable)) {
-                                LOG.info("Stats are successfully truncated for upgrade 4.7!!");
+                                LOGGER.info("Stats are successfully truncated for upgrade 4.7!!");
                             }
                             return null;
                         }
                     });
 
                 } catch (Exception exception) {
-                    LOG.warn("Exception while truncate stats..,"
+                    LOGGER.warn("Exception while truncate stats..,"
                             + " please check and delete stats manually inorder to get proper result with old client!!");
-                    LOG.warn(exception.getStackTrace());
+                    LOGGER.warn(exception.getStackTrace().toString());
                 } finally {
                     try {
                         if (metaTable != null) {
@@ -202,14 +201,10 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         t.start();
 
         if (!enableRebuildIndex) {
-            LOG.info("Failure Index Rebuild is skipped by configuration.");
+            LOGGER.info("Failure Index Rebuild is skipped by configuration.");
             return;
         }
-        // turn off verbose deprecation logging
-        Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
-        if (deprecationLogger != null) {
-            deprecationLogger.setLevel(Level.WARN);
-        }
+
         // Ensure we only run one of the index rebuilder tasks
         if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) {
             try {
@@ -219,7 +214,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                 BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
                 executor.scheduleWithFixedDelay(task, initialRebuildTaskDelay, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS);
             } catch (ClassNotFoundException ex) {
-                LOG.error("BuildIndexScheduleTask cannot start!", ex);
+                LOGGER.error("BuildIndexScheduleTask cannot start!", ex);
             }
         }
     }
@@ -266,7 +261,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     IndexUtil.incrementCounterForIndex(conn, indexName, -PENDING_DISABLE_INACTIVE_STATE_COUNT);
                     indexesIncremented.add(index);
                 }catch(Exception e) {
-                    LOG.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" + index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
+                    LOGGER.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" + index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
                 }
             }
             return indexesIncremented;
@@ -304,7 +299,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     results.clear();
                     hasMore = scanner.next(results);
                     if (results.isEmpty()) {
-                        LOG.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP");
+                        LOGGER.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP");
                         break;
                     }
 
@@ -314,7 +309,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     Cell indexStateCell = r.getColumnLatestCell(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
 
                     if (disabledTimeStamp == null || disabledTimeStamp.length == 0) {
-                        LOG.debug("Null or empty INDEX_DISABLE_TIMESTAMP");
+                        LOGGER.debug("Null or empty INDEX_DISABLE_TIMESTAMP");
                         continue;
                     }
 
@@ -325,7 +320,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
                     if ((dataTable == null || dataTable.length == 0) || indexStateCell == null) {
                         // data table name can't be empty
-                        LOG.debug("Null or data table name or index state");
+                        LOGGER.debug("Null or data table name or index state");
                         continue;
                     }
 
@@ -337,14 +332,14 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 
                     // validity check
                     if (indexTable == null || indexTable.length == 0) {
-                        LOG.debug("We find IndexTable empty during rebuild scan:" + scan
+                        LOGGER.debug("We find IndexTable empty during rebuild scan:" + scan
                                 + "so, Index rebuild has been skipped for row=" + r);
                         continue;
                     }
                     
                     String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable);
                     if (onlyTheseTables != null && !onlyTheseTables.contains(dataTableFullName)) {
-                        LOG.debug("Could not find " + dataTableFullName + " in " + onlyTheseTables);
+                        LOGGER.debug("Could not find " + dataTableFullName + " in " + onlyTheseTables);
                         continue;
                     }
 
@@ -358,7 +353,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     PTable indexPTable = PhoenixRuntime.getTableNoCache(conn, indexTableFullName);
                     // Sanity check in case index was removed from table
                     if (!dataPTable.getIndexes().contains(indexPTable)) {
-                        LOG.debug(dataTableFullName + " does not contain " + indexPTable.getName().getString());
+                        LOGGER.debug(dataTableFullName + " does not contain " + indexPTable.getName().getString());
                         continue;
                     }
                     
@@ -380,7 +375,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     // an index write fails.
                     if ((indexState == PIndexState.DISABLE || indexState == PIndexState.PENDING_ACTIVE)
                             && !MetaDataUtil.tableRegionsOnline(this.env.getConfiguration(), indexPTable)) {
-                        LOG.debug("Index rebuild has been skipped because not all regions of index table="
+                        LOGGER.debug("Index rebuild has been skipped because not all regions of index table="
                                 + indexPTable.getName() + " are online.");
                         continue;
                     }
@@ -394,12 +389,12 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                          */
                         try {
                             IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, 0l);
-                            LOG.error("Unable to rebuild index " + indexTableFullName
+                            LOGGER.error("Unable to rebuild index " + indexTableFullName
                                     + ". Won't attempt again since index disable timestamp is older than current time by "
                                     + indexDisableTimestampThreshold
                                     + " milliseconds. Manual intervention needed to re-build the index");
                         } catch (Throwable ex) {
-                            LOG.error(
+                            LOGGER.error(
                                 "Unable to mark index " + indexTableFullName + " as disabled.", ex);
                         }
                         continue; // don't attempt another rebuild irrespective of whether
@@ -418,7 +413,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, null);
                         continue; // Must wait until clients start to do index maintenance again
                     } else if (indexState != PIndexState.INACTIVE && indexState != PIndexState.ACTIVE) {
-                        LOG.warn("Unexpected index state of " + indexTableFullName + "=" + indexState + ". Skipping partial rebuild attempt.");
+                        LOGGER.warn("Unexpected index state of " + indexTableFullName + "=" + indexState + ". Skipping partial rebuild attempt.");
                         continue;
                     }
                     long currentTime = EnvironmentEdgeManager.currentTimeMillis();
@@ -427,7 +422,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                                     QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME);
                     // Wait until no failures have occurred in at least forwardOverlapDurationMs
                     if (indexStateCell.getTimestamp() + forwardOverlapDurationMs > currentTime) {
-                        LOG.debug("Still must wait " + (indexStateCell.getTimestamp() + forwardOverlapDurationMs - currentTime) + " before starting rebuild for " + indexTableFullName);
+                        LOGGER.debug("Still must wait " + (indexStateCell.getTimestamp() + forwardOverlapDurationMs - currentTime) + " before starting rebuild for " + indexTableFullName);
                         continue; // Haven't waited long enough yet
                     }
                     Long upperBoundOfRebuild = indexStateCell.getTimestamp() + forwardOverlapDurationMs;
@@ -438,7 +433,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
                         dataTableToIndexesMap.put(dataPTable, indexesToPartiallyRebuild);
                     }
-                    LOG.debug("We have found " + indexPTable.getIndexState() + " Index:" + indexPTable.getName()
+                    LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" + indexPTable.getName()
                             + " on data table:" + dataPTable.getName() + " which failed to be updated at "
                             + indexPTable.getIndexDisableTimestamp());
                     indexesToPartiallyRebuild.add(new Pair<PTable,Long>(indexPTable,upperBoundOfRebuild));
@@ -474,7 +469,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								long disabledTimeStampVal = index.getIndexDisableTimestamp();
 								if (disabledTimeStampVal != 0) {
                                     if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) {
-                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild); 
+                                        LOGGER.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild);
                                     }
 								    signOfDisableTimeStamp = Long.signum(disabledTimeStampVal);
 	                                disabledTimeStampVal = Math.abs(disabledTimeStampVal);
@@ -491,13 +486,13 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 							}
 							// No indexes are disabled, so skip this table
 							if (earliestDisableTimestamp == Long.MAX_VALUE) {
-		                        LOG.debug("No indexes are disabled so continuing");
+		                        LOGGER.debug("No indexes are disabled so continuing");
 								continue;
 							}
 							long scanBeginTime = Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs);
                             long scanEndTime = Math.min(latestUpperBoundTimestamp,
                                     getTimestampForBatch(scanBeginTime,batchExecutedPerTableMap.get(dataPTable.getName())));
-							LOG.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
+							LOGGER.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
 									+ " from timestamp=" + scanBeginTime + " until " + scanEndTime);
 							
 							TableRef tableRef = new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false);
@@ -517,7 +512,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 							byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr);
 							dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue);
 							ScanUtil.setClientVersion(dataTableScan, MetaDataProtocol.PHOENIX_VERSION);
-                            LOG.info("Starting to partially build indexes:" + indexesToPartiallyRebuild
+                            LOGGER.info("Starting to partially build indexes:" + indexesToPartiallyRebuild
                                     + " on data table:" + dataPTable.getName() + " with the earliest disable timestamp:"
                                     + earliestDisableTimestamp + " till "
                                     + (scanEndTime == HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime));
@@ -525,10 +520,10 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 							long rowCount = mutationState.getUpdateCount();
 							decrementIndexesPendingDisableCount(conn, dataPTable, indexesToPartiallyRebuild);
 							if (scanEndTime == latestUpperBoundTimestamp) {
-                                LOG.info("Rebuild completed for all inactive/disabled indexes in data table:"
+                                LOGGER.info("Rebuild completed for all inactive/disabled indexes in data table:"
                                         + dataPTable.getName());
                             }
-                            LOG.info(" no. of datatable rows read in rebuilding process is " + rowCount);
+                            LOGGER.info(" no. of datatable rows read in rebuilding process is " + rowCount);
 							for (PTable indexPTable : indexesToPartiallyRebuild) {
 								String indexTableFullName = SchemaUtil.getTableName(
 										indexPTable.getSchemaName().getString(),
@@ -538,7 +533,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								        IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
 								            latestUpperBoundTimestamp);
 								        batchExecutedPerTableMap.remove(dataPTable.getName());
-								        LOG.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
+								        LOGGER.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
 								    } else {
 								        // Increment timestamp so that client sees updated disable timestamp
 								        IndexUtil.updateIndexState(conn, indexTableFullName, indexPTable.getIndexState(),
@@ -548,34 +543,34 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								            noOfBatches = 0l;
 								        }
 								        batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-								        LOG.info(
+								        LOGGER.info(
 								            "During Round-robin build: Successfully updated index disabled timestamp  for "
 								                + indexTableFullName + " to " + scanEndTime);
 								    }
 								} catch (SQLException e) {
-								    LOG.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
+								    LOGGER.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
 								}
 							}
 						} catch (Exception e) {
-							LOG.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
+							LOGGER.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
 						}
 					}
 				}
 			} catch (Throwable t) {
-				LOG.warn("ScheduledBuildIndexTask failed!", t);
+				LOGGER.warn("ScheduledBuildIndexTask failed!", t);
 			} finally {
 				if (scanner != null) {
 					try {
 						scanner.close();
 					} catch (IOException ignored) {
-						LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
+						LOGGER.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
 					}
 				}
 				if (conn != null) {
 					try {
 						conn.close();
 					} catch (SQLException ignored) {
-						LOG.debug("ScheduledBuildIndexTask can't close connection", ignored);
+						LOGGER.debug("ScheduledBuildIndexTask can't close connection", ignored);
 					}
 				}
 			}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 9db11b0..c80070c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -71,6 +69,8 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.MetaDataUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.RpcCallback;
@@ -83,9 +83,9 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
     private boolean hbaseAccessControllerEnabled;
     private UserProvider userProvider;
     private AccessChecker accessChecker;
-    public static final Log LOG = LogFactory.getLog(PhoenixAccessController.class);
-    private static final Log AUDITLOG =
-            LogFactory.getLog("SecurityLogger."+PhoenixAccessController.class.getName());
+    public static final Logger LOGGER = LoggerFactory.getLogger(PhoenixAccessController.class);
+    private static final Logger AUDITLOG =
+            LoggerFactory.getLogger("SecurityLogger."+PhoenixAccessController.class.getName());
 
     private List<BaseMasterAndRegionObserver> getAccessControllers() throws IOException {
         ArrayList<BaseMasterAndRegionObserver> oldAccessControllers = accessControllers.get();
@@ -120,7 +120,7 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
         this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
                 QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
         if (!this.accessCheckEnabled) {
-            LOG.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
+            LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
         }
         if (env instanceof PhoenixMetaDataControllerEnvironment) {
             this.env = (PhoenixMetaDataControllerEnvironment)env;
@@ -594,8 +594,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
                 }
               }
             }
-        } else if (LOG.isDebugEnabled()) {
-            LOG.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
+        } else if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
         }
         return false;
     }
@@ -620,7 +620,7 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
     }
 
     private static final class Superusers {
-        private static final Log LOG = LogFactory.getLog(Superusers.class);
+        private static final Logger LOGGER = LoggerFactory.getLogger(Superusers.class);
 
         /** Configuration key for superusers */
         public static final String SUPERUSER_CONF_KEY = org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting a name
@@ -648,8 +648,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
                     + "authorization checks for internal operations will not work correctly!");
             }
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Current user name is " + systemUser.getShortName());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Current user name is " + systemUser.getShortName());
             }
             String currentUser = systemUser.getShortName();
             String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index e58fbca..df4986b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.NavigableMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -43,6 +41,8 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnImpl;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES;
 
@@ -58,7 +58,7 @@ import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES;
  */
 public class ScanRegionObserver extends BaseScannerRegionObserver {
 
-    private static final Log LOG = LogFactory.getLog(ScanRegionObserver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ScanRegionObserver.class);
     public static final byte[] DYN_COLS_METADATA_CELL_QUALIFIER = Bytes.toBytes("D#");
     public static final String DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION =
             "_DynColsMetadataStoredForMutation";
@@ -125,8 +125,8 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             Put dynColShadowCellsPut = null;
             if (m instanceof Put && Bytes.equals(m.getAttribute(
                     DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION), TRUE_BYTES)) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Adding dynamic column metadata for table: " + tableName + ". Put :" +
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding dynamic column metadata for table: " + tableName + ". Put :" +
                             m.toString());
                 }
                 NavigableMap<byte[], List<Cell>> famCellMap = m.getFamilyCellMap();
@@ -174,8 +174,8 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         ByteArrayOutputStream qual = new ByteArrayOutputStream();
         qual.write(DYN_COLS_METADATA_CELL_QUALIFIER);
         qual.write(dynCol.getColumnQualifierBytes());
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Storing shadow cell for dynamic column metadata for dynamic column : " +
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Storing shadow cell for dynamic column metadata for dynamic column : " +
                     dynCol.getFamilyName().getString() + "." + dynCol.getName().getString());
         }
         return qual.toByteArray();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
index 7d00f46..671d888 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
@@ -36,16 +36,12 @@ import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableMap;
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -55,7 +51,8 @@ import org.apache.phoenix.schema.PTable.TaskType;
 import org.apache.phoenix.schema.task.Task;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.QueryUtil;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Coprocessor for task related operations. This coprocessor would only be registered
@@ -63,7 +60,7 @@ import org.apache.phoenix.util.QueryUtil;
  */
 
 public class TaskRegionObserver extends BaseRegionObserver {
-    public static final Log LOG = LogFactory.getLog(TaskRegionObserver.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(TaskRegionObserver.class);
 
     protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(TaskType.values().length);
     private long timeInterval = QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS;
@@ -136,12 +133,6 @@ public class TaskRegionObserver extends BaseRegionObserver {
     public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
         final RegionCoprocessorEnvironment env = e.getEnvironment();
 
-        // turn off verbose deprecation logging
-        Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
-        if (deprecationLogger != null) {
-            deprecationLogger.setLevel(Level.WARN);
-        }
-
         SelfHealingTask task = new SelfHealingTask(e.getEnvironment(), timeMaxInterval);
         executor.scheduleWithFixedDelay(task, initialDelay, timeInterval, TimeUnit.MILLISECONDS);
     }
@@ -170,7 +161,7 @@ public class TaskRegionObserver extends BaseRegionObserver {
                     try {
                         TaskType taskType = taskRecord.getTaskType();
                         if (!classMap.containsKey(taskType)) {
-                            LOG.warn("Don't know how to execute task type: " + taskType.name());
+                            LOGGER.warn("Don't know how to execute task type: " + taskType.name());
                             continue;
                         }
 
@@ -224,7 +215,7 @@ public class TaskRegionObserver extends BaseRegionObserver {
 
                     }
                     catch (Throwable t) {
-                        LOG.warn("Exception while running self healingtask. " +
+                        LOGGER.warn("Exception while running self healingtask. " +
                                 "It will be retried in the next system task table scan : " +
                                 " taskType : " + taskRecord.getTaskType().name() +
                                 taskRecord.getSchemaName()  + "." + taskRecord.getTableName() +
@@ -233,13 +224,13 @@ public class TaskRegionObserver extends BaseRegionObserver {
                     }
                 }
             } catch (Throwable t) {
-                LOG.error("SelfHealingTask failed!", t);
+                LOGGER.error("SelfHealingTask failed!", t);
             } finally {
                 if (connForTask != null) {
                     try {
                         connForTask.close();
                     } catch (SQLException ignored) {
-                        LOG.debug("SelfHealingTask can't close connection", ignored);
+                        LOGGER.debug("SelfHealingTask can't close connection", ignored);
                     }
                 }
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
index f00e1f6..121efd4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
@@ -1,7 +1,5 @@
 package org.apache.phoenix.coprocessor.tasks;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.coprocessor.MetaDataEndpointImpl;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.TaskRegionObserver;
@@ -10,6 +8,8 @@ import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.task.Task;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.SQLException;
 import java.sql.Timestamp;
@@ -20,7 +20,7 @@ import java.util.Properties;
  *
  */
 public class DropChildViewsTask extends BaseTask {
-    public static final Log LOG = LogFactory.getLog(DropChildViewsTask.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(DropChildViewsTask.class);
 
     public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) {
         PhoenixConnection pconn = null;
@@ -44,14 +44,14 @@ public class DropChildViewsTask extends BaseTask {
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
             } else if (System.currentTimeMillis() < timeMaxInterval + timestamp.getTime()) {
                 // skip this task as it has not been expired and its parent table has not been dropped yet
-                LOG.info("Skipping a child view drop task. The parent table has not been dropped yet : " +
+                LOGGER.info("Skipping a child view drop task. The parent table has not been dropped yet : " +
                         taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                         " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) +
                         " and timestamp " + timestamp.toString());
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, "");
             }
             else {
-                LOG.warn(" A drop child view task has expired and will be marked as failed : " +
+                LOGGER.warn(" A drop child view task has expired and will be marked as failed : " +
                         taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                         " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) +
                         " and timestamp " + timestamp.toString());
@@ -59,7 +59,7 @@ public class DropChildViewsTask extends BaseTask {
             }
         }
         catch (Throwable t) {
-            LOG.warn("Exception while dropping a child view task. " +
+            LOGGER.warn("Exception while dropping a child view task. " +
                     taskRecord.getSchemaName()  + "." + taskRecord.getTableName() +
                     " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) +
                     " and timestamp " + timestamp.toString(), t);
@@ -69,7 +69,7 @@ public class DropChildViewsTask extends BaseTask {
                 try {
                     pconn.close();
                 } catch (SQLException ignored) {
-                    LOG.debug("DropChildViewsTask can't close connection", ignored);
+                    LOGGER.debug("DropChildViewsTask can't close connection", ignored);
                 }
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
index 754ea8e..d03a35b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
@@ -3,8 +3,7 @@ package org.apache.phoenix.coprocessor.tasks;
 import com.google.common.base.Strings;
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.mapreduce.Cluster;
@@ -16,6 +15,8 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.task.Task;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.Connection;
 import java.sql.SQLException;
@@ -31,7 +32,7 @@ public class IndexRebuildTask extends BaseTask  {
     public static final String DISABLE_BEFORE = "DisableBefore";
     public static final String REBUILD_ALL = "RebuildAll";
 
-    public static final Log LOG = LogFactory.getLog(IndexRebuildTask.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildTask.class);
 
     @Override
     public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) {
@@ -54,7 +55,7 @@ public class IndexRebuildTask extends BaseTask  {
             if (Strings.isNullOrEmpty(indexName)) {
                 String str = "Index name is not found. Index rebuild cannot continue " +
                         "Data : " + data;
-                LOG.warn(str);
+                LOGGER.warn(str);
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str);
             }
 
@@ -94,7 +95,7 @@ public class IndexRebuildTask extends BaseTask  {
             return null;
         }
         catch (Throwable t) {
-            LOG.warn("Exception while running index rebuild task. " +
+            LOGGER.warn("Exception while running index rebuild task. " +
                     "It will be retried in the next system task table scan : " +
                     taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                     " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) +
@@ -105,7 +106,7 @@ public class IndexRebuildTask extends BaseTask  {
                 try {
                     conn.close();
                 } catch (SQLException e) {
-                    LOG.debug("IndexRebuildTask can't close connection");
+                    LOGGER.debug("IndexRebuildTask can't close connection");
                 }
             }
         }
@@ -148,7 +149,7 @@ public class IndexRebuildTask extends BaseTask  {
 
             if (job != null && job.isComplete()) {
                 if (job.isSuccessful()) {
-                    LOG.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName());
+                    LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName());
                     return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
                 } else {
                     return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 55dbfae..e1ef1d5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -81,6 +79,8 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
@@ -95,7 +95,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class BaseQueryPlan implements QueryPlan {
-	private static final Log LOG = LogFactory.getLog(BaseQueryPlan.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryPlan.class);
     protected static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K
     
     protected final TableRef tableRef;
@@ -357,13 +357,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
             }
         }
         
-        if (LOG.isDebugEnabled()) {
-        	LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
+        if (LOGGER.isDebugEnabled()) {
+        	LOGGER.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
         }
         
         ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
-        if (LOG.isDebugEnabled()) {
-        	LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
+        if (LOGGER.isDebugEnabled()) {
+        	LOGGER.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
         }
 
         // wrap the iterator so we start/end tracing as we expect
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index b5cd6b1..2117d22 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -34,8 +34,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -83,6 +81,8 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.CostUtil;
 import org.apache.phoenix.util.SQLCloseables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -90,7 +90,7 @@ import com.google.common.collect.Sets;
 import org.apache.phoenix.util.ServerUtil;
 
 public class HashJoinPlan extends DelegateQueryPlan {
-    private static final Log LOG = LogFactory.getLog(HashJoinPlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(HashJoinPlan.class);
     private static final Random RANDOM = new Random();
 
     private final SelectStatement statement;
@@ -553,9 +553,9 @@ public class HashJoinPlan extends DelegateQueryPlan {
                     } else {
                         cacheId = Bytes.toBytes(RANDOM.nextLong());
                     }
-                    LOG.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString);
+                    LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString);
                     if (cache == null) {
-                        LOG.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId));
+                        LOGGER.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId));
                         cache = parent.hashClient.addHashCache(ranges, cacheId, iterator,
                                 plan.getEstimatedSize(), hashExpressions, singleValueOnly, usePersistentCache,
                                 parent.delegate.getTableRef().getTable(), keyRangeRhsExpression,
@@ -564,7 +564,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
                         boolean isSet = parent.firstJobEndTime.compareAndSet(0, endTime);
                         if (!isSet && (endTime
                                 - parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive) {
-                            LOG.warn(addCustomAnnotations(
+                            LOGGER.warn(addCustomAnnotations(
                                 "Hash plan [" + index
                                         + "] execution seems too slow. Earlier hash cache(s) might have expired on servers.",
                                 parent.delegate.getContext().getConnection()));
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
index 9d48feb..6644a7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Locale;
 
 import org.apache.commons.lang.BooleanUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
@@ -37,6 +35,8 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.VarBinaryFormatter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.force.db.i18n.LinguisticSort;
 import com.force.i18n.LocaleUtils;
@@ -87,7 +87,7 @@ import com.force.i18n.LocaleUtils;
 		@FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true) })
 public class CollationKeyFunction extends ScalarFunction {
 
-	private static final Log LOG = LogFactory.getLog(CollationKeyFunction.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(CollationKeyFunction.class);
 
 	public static final String NAME = "COLLATION_KEY";
 
@@ -114,8 +114,8 @@ public class CollationKeyFunction extends ScalarFunction {
 			return false;
 		}
 		String inputString = (String) PVarchar.INSTANCE.toObject(ptr, expression.getSortOrder());
-		if (LOG.isTraceEnabled()) {
-			LOG.trace("CollationKey inputString: " + inputString);
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace("CollationKey inputString: " + inputString);
 		}
 
 		if (inputString == null) {
@@ -124,8 +124,8 @@ public class CollationKeyFunction extends ScalarFunction {
 
 		byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
 		}
 
 		ptr.set(collationKeyByteArray);
@@ -138,19 +138,19 @@ public class CollationKeyFunction extends ScalarFunction {
 		Integer collatorStrength = getLiteralValue(3, Integer.class);
 		Integer collatorDecomposition = getLiteralValue(4, Integer.class);
 
-		if (LOG.isTraceEnabled()) {
+		if (LOGGER.isTraceEnabled()) {
 			StringBuilder logInputsMessage = new StringBuilder();
 			logInputsMessage.append("Input (literal) arguments:").append("localeISOCode: " + localeISOCode)
 					.append(", useSpecialUpperCaseCollator: " + useSpecialUpperCaseCollator)
 					.append(", collatorStrength: " + collatorStrength)
 					.append(", collatorDecomposition: " + collatorDecomposition);
-			LOG.trace(logInputsMessage);
+			LOGGER.trace(logInputsMessage.toString());
 		}
 
 		Locale locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode);
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace(String.format("Locale: " + locale.toLanguageTag()));
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace(String.format("Locale: " + locale.toLanguageTag()));
 		}
 
 		LinguisticSort linguisticSort = LinguisticSort.get(locale);
@@ -166,8 +166,8 @@ public class CollationKeyFunction extends ScalarFunction {
 			collator.setDecomposition(collatorDecomposition);
 		}
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
 					collator.getStrength(), collator.getDecomposition(),
 					BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
 		}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 1c99588..1c036ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -29,8 +29,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -88,6 +86,8 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
@@ -115,7 +115,7 @@ import com.google.common.collect.Multimap;
  */
 public class Indexer extends BaseRegionObserver {
 
-  private static final Log LOG = LogFactory.getLog(Indexer.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(Indexer.class);
   private static final OperationStatus IGNORE = new OperationStatus(OperationStatusCode.SUCCESS);
   private static final OperationStatus NOWRITE = new OperationStatus(OperationStatusCode.SUCCESS);
   
@@ -233,7 +233,7 @@ public class Indexer extends BaseRegionObserver {
                 StoreFailuresInCachePolicy.class, IndexFailurePolicy.class);
           IndexFailurePolicy policy =
               policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits);
-          LOG.debug("Setting up recovery writter with failure policy: " + policy.getClass());
+          LOGGER.debug("Setting up recovery writter with failure policy: " + policy.getClass());
           recoveryWriter =
               new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer");
         } catch (Exception ex) {
@@ -242,7 +242,7 @@ public class Indexer extends BaseRegionObserver {
       } catch (NoSuchMethodError ex) {
           disabled = true;
           super.start(e);
-          LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
+          LOGGER.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
       }
   }
 
@@ -324,8 +324,8 @@ public class Indexer extends BaseRegionObserver {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
               }
               metricSource.incrementSlowDuplicateKeyCheckCalls();
           }
@@ -349,8 +349,8 @@ public class Indexer extends BaseRegionObserver {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -503,8 +503,8 @@ public class Indexer extends BaseRegionObserver {
 
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -575,8 +575,8 @@ public class Indexer extends BaseRegionObserver {
            removeBatchMutateContext(c);
            long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
            if (duration >= slowIndexWriteThreshold) {
-               if (LOG.isDebugEnabled()) {
-                   LOG.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
+               if (LOGGER.isDebugEnabled()) {
+                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
                }
                metricSource.incrementNumSlowIndexWriteCalls();
            }
@@ -615,8 +615,8 @@ public class Indexer extends BaseRegionObserver {
 
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexWriteThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
               }
               metricSource.incrementNumSlowIndexWriteCalls();
           }
@@ -674,7 +674,7 @@ public class Indexer extends BaseRegionObserver {
           return;
         }
 
-        LOG.info("Found some outstanding index updates that didn't succeed during"
+        LOGGER.info("Found some outstanding index updates that didn't succeed during"
                 + " WAL replay - attempting to replay now.");
 
         // do the usual writer stuff, killing the server again, if we can't manage to make the index
@@ -682,14 +682,14 @@ public class Indexer extends BaseRegionObserver {
         try {
             writer.writeAndKillYourselfOnFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
         } catch (IOException e) {
-                LOG.error("During WAL replay of outstanding index updates, "
+                LOGGER.error("During WAL replay of outstanding index updates, "
                         + "Exception is thrown instead of killing server during index writing", e);
         }
     } finally {
          long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
          if (duration >= slowPostOpenThreshold) {
-             if (LOG.isDebugEnabled()) {
-                 LOG.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
+             if (LOGGER.isDebugEnabled()) {
+                 LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
              }
              metricSource.incrementNumSlowPostOpenCalls();
          }
@@ -722,8 +722,8 @@ public class Indexer extends BaseRegionObserver {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowPreWALRestoreThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
               }
               metricSource.incrementNumSlowPreWALRestoreCalls();
           }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
index 02e4c3c..c65fc9b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
@@ -25,12 +25,12 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -41,7 +41,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
  *
  */
 public class LockManager {
-    private static final Log LOG = LogFactory.getLog(LockManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LockManager.class);
 
     private final ConcurrentHashMap<ImmutableBytesPtr, RowLockContext> lockedRows =
             new ConcurrentHashMap<ImmutableBytesPtr, RowLockContext>();
@@ -99,7 +99,7 @@ public class LockManager {
             success = true;
             return result;
         } catch (InterruptedException ie) {
-            LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
+            LOGGER.warn("Thread interrupted waiting for lock on row: " + rowKey);
             InterruptedIOException iie = new InterruptedIOException();
             iie.initCause(ie);
             if (traceScope != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
index 0ff83ca..69d135d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
@@ -14,8 +14,6 @@ import java.lang.reflect.Constructor;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Increment;
@@ -27,6 +25,8 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
 import org.apache.phoenix.hbase.index.covered.IndexCodec;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Basic implementation of the {@link IndexBuilder} that doesn't do any actual work of indexing.
@@ -38,7 +38,7 @@ import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
  */
 public abstract class BaseIndexBuilder implements IndexBuilder {
     public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class";
-    private static final Log LOG = LogFactory.getLog(BaseIndexBuilder.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseIndexBuilder.class);
 
     protected boolean stopped;
     protected RegionCoprocessorEnvironment env;
@@ -120,7 +120,7 @@ public abstract class BaseIndexBuilder implements IndexBuilder {
 
     @Override
     public void stop(String why) {
-        LOG.debug("Stopping because: " + why);
+        LOGGER.debug("Stopping because: " + why);
         this.stopped = true;
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
index 07a05bc..cf5dad2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
@@ -22,8 +22,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Stoppable;
@@ -37,13 +35,15 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
 import org.apache.phoenix.hbase.index.Indexer;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.index.PhoenixIndexMetaData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage the building of index updates from primary table updates.
  */
 public class IndexBuildManager implements Stoppable {
 
-  private static final Log LOG = LogFactory.getLog(IndexBuildManager.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexBuildManager.class);
   private final IndexBuilder delegate;
   private boolean stopped;
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
index 820a475..19eda4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
@@ -13,8 +13,6 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -27,6 +25,8 @@ import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;
 import org.apache.phoenix.hbase.index.covered.data.LocalTable;
 import org.apache.phoenix.hbase.index.covered.update.ColumnTracker;
 import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Build covered indexes for phoenix updates.
@@ -38,7 +38,7 @@ import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
  * bloated index that needs to be cleaned up by a background process.
  */
 public class NonTxIndexBuilder extends BaseIndexBuilder {
-    private static final Log LOG = LogFactory.getLog(NonTxIndexBuilder.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(NonTxIndexBuilder.class);
 
     protected LocalHBaseState localTable;
 
@@ -57,8 +57,8 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
 
         batchMutationAndAddUpdates(manager, state, mutation, indexMetaData);
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Found index updates for Mutation: " + mutation + "\n" + manager);
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Found index updates for Mutation: " + mutation + "\n" + manager);
         }
 
         return manager.toMap();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 0fc9e14..4a62e14 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -21,8 +21,6 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.SortedSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
@@ -34,6 +32,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
 import org.apache.phoenix.hbase.index.covered.LocalTableState;
 import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Like the HBase {@link MemStore}, but without all that extra work around maintaining snapshots and
@@ -74,7 +74,7 @@ import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
  */
 public class IndexMemStore implements KeyValueStore {
 
-  private static final Log LOG = LogFactory.getLog(IndexMemStore.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexMemStore.class);
   private IndexKeyValueSkipListSet kvset;
   private Comparator<KeyValue> comparator;
 
@@ -113,8 +113,8 @@ public class IndexMemStore implements KeyValueStore {
 
   @Override
   public void add(KeyValue kv, boolean overwrite) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Inserting: " + toString(kv));
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Inserting: " + toString(kv));
     }
     // if overwriting, we will always update
     if (!overwrite) {
@@ -124,17 +124,17 @@ public class IndexMemStore implements KeyValueStore {
       kvset.add(kv);
     }
 
-    if (LOG.isTraceEnabled()) {
+    if (LOGGER.isTraceEnabled()) {
       dump();
     }
   }
 
   private void dump() {
-    LOG.trace("Current kv state:\n");
+    LOGGER.trace("Current kv state:\n");
     for (KeyValue kv : this.kvset) {
-      LOG.trace("KV: " + toString(kv));
+      LOGGER.trace("KV: " + toString(kv));
     }
-    LOG.trace("========== END MemStore Dump ==================\n");
+    LOGGER.trace("========== END MemStore Dump ==================\n");
   }
 
   private String toString(KeyValue kv) {
@@ -144,12 +144,12 @@ public class IndexMemStore implements KeyValueStore {
 
   @Override
   public void rollback(KeyValue kv) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Rolling back: " + toString(kv));
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Rolling back: " + toString(kv));
     }
     // If the key is in the store, delete it
     this.kvset.remove(kv);
-    if (LOG.isTraceEnabled()) {
+    if (LOGGER.isTraceEnabled()) {
       dump();
     }
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
index 5cd3fcb..145c95b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
@@ -23,9 +23,9 @@ import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
@@ -41,7 +41,7 @@ import com.google.common.util.concurrent.MoreExecutors;
  */
 public abstract class BaseTaskRunner implements TaskRunner {
 
-  private static final Log LOG = LogFactory.getLog(BaseTaskRunner.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(BaseTaskRunner.class);
   protected ListeningExecutorService writerPool;
   private boolean stopped;
 
@@ -77,7 +77,7 @@ public abstract class BaseTaskRunner implements TaskRunner {
 
   private void logAndNotifyAbort(Exception e, Abortable abort) {
     String msg = "Found a failed task because: " + e.getMessage();
-    LOG.error(msg, e);
+    LOGGER.error(msg, e);
     abort.abort(msg, e.getCause());
   }
 
@@ -118,7 +118,7 @@ public abstract class BaseTaskRunner implements TaskRunner {
     if (this.stopped) {
       return;
     }
-    LOG.info("Shutting down task runner because " + why);
+    LOGGER.info("Shutting down task runner because " + why);
     this.writerPool.shutdownNow();
   }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
index 5b9717e..720ad98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
@@ -20,8 +20,8 @@ package org.apache.phoenix.hbase.index.parallel;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -32,7 +32,7 @@ import com.google.common.util.concurrent.ListenableFuture;
  */
 public class QuickFailingTaskRunner extends BaseTaskRunner {
 
-  static final Log LOG = LogFactory.getLog(QuickFailingTaskRunner.class);
+  static final Logger LOGGER = LoggerFactory.getLogger(QuickFailingTaskRunner.class);
 
   /**
    * @param service thread pool to which {@link Task}s are submitted. This service is then 'owned'
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
index 62e4522..208464e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
@@ -22,9 +22,9 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A group of {@link Task}s. The tasks are all bound together using the same {@link Abortable} (
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.Abortable;
  * @param <V> expected result type from all the tasks
  */
 public class TaskBatch<V> implements Abortable {
-  private static final Log LOG = LogFactory.getLog(TaskBatch.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TaskBatch.class);
   private AtomicBoolean aborted = new AtomicBoolean();
   private List<Task<V>> tasks;
 
@@ -57,7 +57,7 @@ public class TaskBatch<V> implements Abortable {
     if (this.aborted.getAndSet(true)) {
       return;
     }
-    LOG.info("Aborting batch of tasks because " + why);
+    LOGGER.info("Aborting batch of tasks because " + why);
   }
 
   @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
index 58a976a..bedd495 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
@@ -17,10 +17,10 @@
  */
 package org.apache.phoenix.hbase.index.parallel;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper utility to make a thread pool from a configuration based on reasonable defaults and passed
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Pair;
  */
 public class ThreadPoolBuilder {
 
-  private static final Log LOG = LogFactory.getLog(ThreadPoolBuilder.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolBuilder.class);
   private static final long DEFAULT_TIMEOUT = 60;
   private static final int DEFAULT_MAX_THREADS = 1;// is there a better default?
   private Pair<String, Long> timeout;
@@ -72,7 +72,7 @@ public class ThreadPoolBuilder {
       maxThreads =
           key == null ? this.maxThreads.getSecond() : conf.getInt(key, this.maxThreads.getSecond());
     }
-    LOG.trace("Creating pool builder with max " + maxThreads + " threads ");
+    LOGGER.trace("Creating pool builder with max " + maxThreads + " threads ");
     return maxThreads;
   }
 
@@ -84,7 +84,7 @@ public class ThreadPoolBuilder {
           key == null ? this.timeout.getSecond() : conf.getLong(key, this.timeout.getSecond());
     }
 
-    LOG.trace("Creating pool builder with core thread timeout of " + timeout + " seconds ");
+    LOGGER.trace("Creating pool builder with core thread timeout of " + timeout + " seconds ");
     return timeout;
   }
 }
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
index db3b845..2de0528 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
@@ -27,18 +27,18 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Threads;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage access to thread pools
  */
 public class ThreadPoolManager {
 
-  private static final Log LOG = LogFactory.getLog(ThreadPoolManager.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolManager.class);
 
   /**
    * Get an executor for the given name, based on the passed {@link Configuration}. If a thread pool
@@ -62,7 +62,7 @@ public class ThreadPoolManager {
     ThreadPoolExecutor pool = (ThreadPoolExecutor) poolCache.get(builder.getName());
     if (pool == null || pool.isTerminating() || pool.isShutdown()) {
       pool = getDefaultExecutor(builder);
-      LOG.info("Creating new pool for " + builder.getName());
+      LOGGER.info("Creating new pool for " + builder.getName());
       poolCache.put(builder.getName(), pool);
     }
     ((ShutdownOnUnusedThreadPoolExecutor) pool).addReference();
@@ -120,14 +120,14 @@ public class ThreadPoolManager {
     @Override
     protected void finalize() {
       // override references counter if we go out of scope - ensures the pool gets cleaned up
-      LOG.info("Shutting down pool '" + poolName + "' because no more references");
+      LOGGER.info("Shutting down pool '" + poolName + "' because no more references");
       super.finalize();
     }
 
     @Override
     public void shutdown() {
       if (references.decrementAndGet() <= 0) {
-        LOG.debug("Shutting down pool " + this.poolName);
+        LOGGER.debug("Shutting down pool " + this.poolName);
         super.shutdown();
       }
     }
@@ -135,7 +135,7 @@ public class ThreadPoolManager {
     @Override
     public List<Runnable> shutdownNow() {
       if (references.decrementAndGet() <= 0) {
-        LOG.debug("Shutting down pool " + this.poolName + " NOW!");
+        LOGGER.debug("Shutting down pool " + this.poolName + " NOW!");
         return super.shutdownNow();
       }
       return Collections.emptyList();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index a4a34a1..389af36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -26,8 +26,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -45,6 +43,8 @@ import org.apache.phoenix.hbase.index.covered.Batch;
 import org.apache.phoenix.hbase.index.covered.data.LazyValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Longs;
@@ -65,7 +65,7 @@ public class IndexManagementUtil {
     public static final String WAL_EDIT_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
 
     private static final String INDEX_HLOG_READER_CLASS_NAME = "org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader";
-    private static final Log LOG = LogFactory.getLog(IndexManagementUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexManagementUtil.class);
 
     public static boolean isWALEditCodecSet(Configuration conf) {
         // check to see if the WALEditCodec is installed
@@ -191,10 +191,10 @@ public class IndexManagementUtil {
         try {
             throw e;
         } catch (IOException e1) {
-            LOG.info("Rethrowing " + e);
+            LOGGER.info("Rethrowing " + e);
             throw e1;
         } catch (Throwable e1) {
-            LOG.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
+            LOGGER.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
             throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
index c28288c..86624fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
@@ -23,8 +23,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -34,6 +32,8 @@ import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -47,7 +47,7 @@ import com.google.common.collect.Multimap;
  */
 public class IndexWriter implements Stoppable {
 
-  private static final Log LOG = LogFactory.getLog(IndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriter.class);
   public static final String INDEX_COMMITTER_CONF_KEY = "index.writer.commiter.class";
   public static final String INDEX_FAILURE_POLICY_CONF_KEY = "index.writer.failurepolicy.class";
   private AtomicBoolean stopped = new AtomicBoolean(false);
@@ -154,8 +154,8 @@ public class IndexWriter implements Stoppable {
             boolean allowLocalUpdates, int clientVersion) throws IOException {
     try {
       write(toWrite, allowLocalUpdates, clientVersion);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Done writing all index updates!\n\t" + toWrite);
+      if (LOGGER.isTraceEnabled()) {
+        LOGGER.trace("Done writing all index updates!\n\t" + toWrite);
       }
     } catch (Exception e) {
       this.failurePolicy.handleFailure(toWrite, e);
@@ -227,7 +227,7 @@ public class IndexWriter implements Stoppable {
       // already stopped
       return;
     }
-    LOG.debug("Stopping because " + why);
+    LOGGER.debug("Stopping because " + why);
     this.writer.stop(why);
     this.failurePolicy.stop(why);
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index ef53b9f..dd43cb2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.hbase.index.write;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -27,10 +25,12 @@ import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class IndexWriterUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexWriterUtils.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriterUtils.class);
 
   /**
    * Maximum number of threads to allow per-table when writing. Each writer thread (from
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
index cba2459..4996ecc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
@@ -19,13 +19,13 @@ package org.apache.phoenix.hbase.index.write;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -34,7 +34,7 @@ import com.google.common.collect.Multimap;
  */
 public class KillServerOnFailurePolicy implements IndexFailurePolicy {
 
-  private static final Log LOG = LogFactory.getLog(KillServerOnFailurePolicy.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(KillServerOnFailurePolicy.class);
   private Abortable abortable;
   private Stoppable stoppable;
 
@@ -66,11 +66,11 @@ public class KillServerOnFailurePolicy implements IndexFailurePolicy {
     // notify the regionserver of the failure
     String msg =
         "Could not update the index table, killing server region because couldn't write to an index table";
-    LOG.error(msg, cause);
+    LOGGER.error(msg, cause);
     try {
       this.abortable.abort(msg, cause);
     } catch (Exception e) {
-      LOG.fatal("Couldn't abort this server to preserve index writes, "
+      LOGGER.error("Couldn't abort this server to preserve index writes, "
           + "attempting to hard kill the server");
       System.exit(1);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 290e1be..42880ee 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -17,8 +17,6 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
@@ -38,6 +36,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -56,7 +56,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
     public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
     private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
     public static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY = "index.writer.threads.keepalivetime";
-    private static final Log LOG = LogFactory.getLog(ParallelWriterIndexCommitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ParallelWriterIndexCommitter.class);
 
     private HTableFactory retryingFactory;
     private HTableFactory noRetriesfactory;
@@ -144,8 +144,8 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                     // early exit, if that's the case
                     throwFailureIfDone();
 
-                    if (LOG.isTraceEnabled()) {
-                        LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
+                    if (LOGGER.isTraceEnabled()) {
+                        LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference);
                     }
                     HTableInterface table = null;
                     try {
@@ -159,8 +159,8 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                                 return null;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isDebugEnabled()) {
-                                    LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
+                                if (LOGGER.isDebugEnabled()) {
+                                    LOGGER.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                             + ignord);
                                 }
                             }
@@ -201,7 +201,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
         } catch (EarlyExitFailure e) {
             propagateFailure(e);
         } catch (ExecutionException e) {
-            LOG.error("Found a failed index update!");
+            LOGGER.error("Found a failed index update!");
             propagateFailure(e.getCause());
         }
 
@@ -229,7 +229,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
      */
     @Override
     public void stop(String why) {
-        LOG.info("Shutting down " + this.getClass().getSimpleName() + " because " + why);
+        LOGGER.info("Shutting down " + this.getClass().getSimpleName() + " because " + why);
         this.pool.stop(why);
         this.retryingFactory.shutdown();
         this.noRetriesfactory.shutdown();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index fb96666..db7e6a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -24,8 +24,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -45,7 +45,7 @@ import com.google.common.collect.Multimap;
  */
 public class RecoveryIndexWriter extends IndexWriter {
 
-    private static final Log LOG = LogFactory.getLog(RecoveryIndexWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RecoveryIndexWriter.class);
     private Set<HTableInterfaceReference> nonExistingTablesList = new HashSet<HTableInterfaceReference>();
     private HBaseAdmin admin;
 
@@ -71,7 +71,7 @@ public class RecoveryIndexWriter extends IndexWriter {
         } catch (MultiIndexWriteFailureException e) {
             for (HTableInterfaceReference table : e.getFailedTables()) {
                 if (!admin.tableExists(table.getTableName())) {
-                    LOG.warn("Failure due to non existing table: " + table.getTableName());
+                    LOGGER.warn("Failure due to non existing table: " + table.getTableName());
                     nonExistingTablesList.add(table);
                 } else {
                     throw e;
@@ -101,7 +101,7 @@ public class RecoveryIndexWriter extends IndexWriter {
             ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
             HTableInterfaceReference table = tables.get(ptr);
             if (nonExistingTablesList.contains(table)) {
-                LOG.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
+                LOGGER.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
                 continue;
             }
             if (table == null) {
@@ -121,7 +121,7 @@ public class RecoveryIndexWriter extends IndexWriter {
             try {
                 admin.close();
             } catch (IOException e) {
-                LOG.error("Closing the admin failed: ", e);
+                LOGGER.error("Closing the admin failed: ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 4fa2d0b..a0032ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -19,8 +19,6 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
@@ -43,6 +41,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -68,7 +68,7 @@ import com.google.common.collect.Multimap;
  * client.
  */
 public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
-    private static final Log LOG = LogFactory.getLog(TrackingParallelWriterIndexCommitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
 
     public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
     private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
@@ -168,15 +168,15 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                                 return Boolean.TRUE;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isTraceEnabled()) {
-                                    LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
+                                if (LOGGER.isTraceEnabled()) {
+                                    LOGGER.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                             + ignord);
                                 }
                             }
                         }
 
-                        if (LOG.isTraceEnabled()) {
-                            LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
+                        if (LOGGER.isTraceEnabled()) {
+                            LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference);
                         }
                         // if the client can retry index writes, then we don't need to retry here
                         HTableFactory factory = clientVersion < MetaDataProtocol.MIN_CLIENT_RETRY_INDEX_WRITES ? retryingFactory : noRetriesFactory;
@@ -207,7 +207,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
 
         List<Boolean> results = null;
         try {
-            LOG.debug("Waiting on index update tasks to complete...");
+            LOGGER.debug("Waiting on index update tasks to complete...");
             results = this.pool.submitUninterruptible(tasks);
         } catch (ExecutionException e) {
             throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e);
@@ -240,7 +240,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
 
     @Override
     public void stop(String why) {
-        LOG.info("Shutting down " + this.getClass().getSimpleName());
+        LOGGER.info("Shutting down " + this.getClass().getSimpleName());
         this.pool.stop(why);
         this.retryingFactory.shutdown();
         this.noRetriesFactory.shutdown();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 852cd65..3d77c47 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -29,8 +29,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -71,6 +69,8 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
@@ -85,7 +85,7 @@ import com.google.common.collect.Multimap;
  *
  */
 public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
-    private static final Log LOG = LogFactory.getLog(PhoenixIndexFailurePolicy.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexFailurePolicy.class);
     public static final String THROW_INDEX_WRITE_FAILURE = "THROW_INDEX_WRITE_FAILURE";
     public static final String DISABLE_INDEX_ON_WRITE_FAILURE = "DISABLE_INDEX_ON_WRITE_FAILURE";
     public static final String REBUILD_INDEX_ON_WRITE_FAILURE = "REBUILD_INDEX_ON_WRITE_FAILURE";
@@ -174,7 +174,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
             timestamp = handleFailureWithExceptions(attempted, cause);
             throwing = false;
         } catch (Throwable t) {
-            LOG.warn("handleFailure failed", t);
+            LOGGER.warn("handleFailure failed", t);
             super.handleFailure(attempted, cause);
             throwing = false;
         } finally {
@@ -188,7 +188,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 if (throwIndexWriteFailure) {
             		throw ioException;
             	} else {
-                    LOG.warn("Swallowing index write failure", ioException);
+                    LOGGER.warn("Swallowing index write failure", ioException);
             	}
             }
         }
@@ -282,24 +282,24 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                         MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
                                 systemTable, newState);
                         if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                            LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+                            LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
                             continue;
                         }
                         if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                             if (leaveIndexActive) {
-                                LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
                                         + result.getMutationCode());
                                 // If we're not disabling the index, then we don't want to throw as throwing
                                 // will lead to the RS being shutdown.
                                 if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
                                         "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
                             } else {
-                                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = "
+                                LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = "
                                         + result.getMutationCode() + ". Will use default failure policy instead.");
                                 throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                             }
                         }
-                        LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
+                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
                                 + " due to an exception while writing updates. indexState=" + newState,
                             cause);
                     } catch (Throwable t) {
@@ -351,7 +351,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                                         mutation.getRow().length - offset));
                 String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
                 if (indexTableName == null) {
-                    LOG.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
+                    LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
                 } else {
                     indexTableNames.add(indexTableName);
                 }
@@ -437,7 +437,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 }
             }
         } catch (Exception handleE) {
-            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+            LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
         }
     }
 
@@ -532,7 +532,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 }
             }
         } catch (Exception handleE) {
-            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+            LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
         }
     }
 
@@ -571,11 +571,11 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
         decrementCounterForIndex(conn,indexFullName);
         Long indexDisableTimestamp = null;
         if (PIndexState.DISABLE.equals(indexState)) {
-            LOG.info("Disabling index after hitting max number of index write retries: "
+            LOGGER.info("Disabling index after hitting max number of index write retries: "
                     + indexFullName);
             IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp);
         } else if (PIndexState.ACTIVE.equals(indexState)) {
-            LOG.debug("Resetting index to active after subsequent success " + indexFullName);
+            LOGGER.debug("Resetting index to active after subsequent success " + indexFullName);
             //At server disabled timestamp will be reset only if there is no other client is in PENDING_DISABLE state
             indexDisableTimestamp = 0L;
             try {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index 3543da8..642080b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -29,8 +29,6 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -60,6 +58,8 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
 import org.apache.phoenix.util.TransactionUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Do all the work of managing local index updates for a transactional table from a single coprocessor. Since the transaction
@@ -69,7 +69,7 @@ import org.apache.phoenix.util.TransactionUtil;
  */
 public class PhoenixTransactionalIndexer extends BaseRegionObserver {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixTransactionalIndexer.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransactionalIndexer.class);
 
     // Hack to get around not being able to save any state between
     // coprocessor calls. TODO: remove after HBASE-18127 when available
@@ -199,7 +199,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
             TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size());
         } catch (Throwable t) {
             String msg = "Failed to update index with entries:" + indexUpdates;
-            LOG.error(msg, t);
+            LOGGER.error(msg, t);
             ServerUtil.throwIOException(msg, t);
         }
     }
@@ -226,7 +226,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
             }
         } catch (Throwable t) {
             String msg = "Failed to write index updates:" + context.indexUpdates;
-            LOG.error(msg, t);
+            LOGGER.error(msg, t);
             ServerUtil.throwIOException(msg, t);
          } finally {
              removeBatchMutateContext(c);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 1b8735a..450d9d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -18,8 +18,6 @@
 
 package org.apache.phoenix.iterate;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -51,6 +49,8 @@ import org.apache.phoenix.schema.stats.StatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsWriter;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -64,7 +64,7 @@ import java.util.concurrent.ExecutorService;
  */
 public class SnapshotScanner extends AbstractClientScanner {
 
-  private static final Log LOG = LogFactory.getLog(SnapshotScanner.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(SnapshotScanner.class);
   private final Scan scan;
   private RegionScanner scanner;
   private HRegion region;
@@ -74,7 +74,7 @@ public class SnapshotScanner extends AbstractClientScanner {
   public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir,
       HTableDescriptor htd, HRegionInfo hri,  Scan scan) throws Throwable{
 
-    LOG.info("Creating SnapshotScanner for region: " + hri);
+    LOGGER.info("Creating SnapshotScanner for region: " + hri);
 
     scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     values = new ArrayList<>();
@@ -128,7 +128,7 @@ public class SnapshotScanner extends AbstractClientScanner {
         this.scanner.close();
         this.scanner = null;
       } catch (IOException e) {
-        LOG.warn("Exception while closing scanner", e);
+        LOGGER.warn("Exception while closing scanner", e);
       }
     }
     if (this.region != null) {
@@ -137,7 +137,7 @@ public class SnapshotScanner extends AbstractClientScanner {
         this.region.close(true);
         this.region = null;
       } catch (IOException e) {
-        LOG.warn("Exception while closing scanner", e);
+        LOGGER.warn("Exception while closing scanner", e);
       }
     }
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
index c587734..0f3d1e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
@@ -18,8 +18,6 @@
 
 package org.apache.phoenix.iterate;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -34,6 +32,8 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.sql.SQLException;
@@ -52,7 +52,7 @@ import java.util.UUID;
  */
 public class TableSnapshotResultIterator implements ResultIterator {
 
-  private static final Log LOG = LogFactory.getLog(TableSnapshotResultIterator.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TableSnapshotResultIterator.class);
 
   private final Scan scan;
   private ResultIterator scanIterator;
@@ -101,7 +101,7 @@ public class TableSnapshotResultIterator implements ResultIterator {
     }
 
     Collections.sort(this.regions);
-    LOG.info("Initialization complete with " + regions.size() + " valid regions");
+    LOGGER.info("Initialization complete with " + regions.size() + " valid regions");
   }
 
   /**
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 2669360..7d54a2d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -33,8 +33,6 @@ import java.util.logging.Logger;
 
 import javax.annotation.concurrent.Immutable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.security.User;
@@ -55,8 +53,6 @@ import org.slf4j.LoggerFactory;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 
-
-
 /**
  * 
  * Abstract base class for JDBC Driver implementation of Phoenix
@@ -68,8 +64,8 @@ import com.google.common.collect.Maps;
 public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
     /**
      * The protocol for Phoenix Network Client 
-     */ 
-    private static final Log LOG = LogFactory.getLog(PhoenixEmbeddedDriver.class);
+     */
+
     private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//";
     private final static String DRIVER_NAME = "PhoenixEmbeddedDriver";
     private static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -197,7 +193,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
      * @since 0.1.1
      */
     public static class ConnectionInfo {
-        private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ConnectionInfo.class);
+        private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ConnectionInfo.class);
         private static final Object KERBEROS_LOGIN_LOCK = new Object();
         private static final char WINDOWS_SEPARATOR_CHAR = '\\';
         private static final String REALM_EQUIVALENCY_WARNING_MSG = "Provided principal does not contan a realm and the default realm cannot be determined. Ignoring realm equivalency check.";
@@ -378,23 +374,23 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                                 currentUser = UserGroupInformation.getCurrentUser();
                                 if (!currentUser.hasKerberosCredentials() || !isSameName(currentUser.getUserName(), principal)) {
                                     final Configuration config = getConfiguration(props, info, principal, keytab);
-                                    logger.info("Trying to connect to a secure cluster as {} with keytab {}", config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
+                                    LOGGER.info("Trying to connect to a secure cluster as {} with keytab {}", config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
                                             config.get(QueryServices.HBASE_CLIENT_KEYTAB));
                                     UserGroupInformation.setConfiguration(config);
                                     User.login(config, QueryServices.HBASE_CLIENT_KEYTAB, QueryServices.HBASE_CLIENT_PRINCIPAL, null);
-                                    logger.info("Successful login to secure cluster");
+                                    LOGGER.info("Successful login to secure cluster");
                                 }
                             }
                         } else {
                             // The user already has Kerberos creds, so there isn't anything to change in the ConnectionInfo.
-                            logger.debug("Already logged in as {}", currentUser);
+                            LOGGER.debug("Already logged in as {}", currentUser);
                         }
                     } catch (IOException e) {
                         throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
                             .setRootCause(e).build().buildException();
                     }
                 } else {
-                    logger.debug("Principal and keytab not provided, not attempting Kerberos login");
+                    LOGGER.debug("Principal and keytab not provided, not attempting Kerberos login");
                 }
             } // else, no connection, no need to login
             // Will use the current User from UGI
@@ -416,12 +412,12 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
             try {
                 return KerberosUtil.getDefaultRealm();
             } catch (Exception e) {
-                if (LOG.isDebugEnabled()) {
+                if (LOGGER.isDebugEnabled()) {
                     // Include the stacktrace at DEBUG
-                    LOG.debug(REALM_EQUIVALENCY_WARNING_MSG, e);
+                    LOGGER.debug(REALM_EQUIVALENCY_WARNING_MSG, e);
                 } else {
                     // Limit the content at WARN
-                    LOG.warn(REALM_EQUIVALENCY_WARNING_MSG);
+                    LOGGER.warn(REALM_EQUIVALENCY_WARNING_MSG);
                 }
             }
             return null;
@@ -633,7 +629,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                 throw getMalFormedUrlException(url);
             }
             String znodeParent = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            LOG.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
+            LOGGER.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
             return new ConnectionInfo(quorum, port, znodeParent);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index b99ece6..2a9d227 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -51,8 +51,6 @@ import java.util.Map;
 import com.google.common.primitives.Bytes;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -94,6 +92,8 @@ import org.apache.phoenix.schema.types.PTinyint;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.SQLCloseable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Throwables;
@@ -127,7 +127,7 @@ import org.apache.phoenix.util.SchemaUtil;
  */
 public class PhoenixResultSet implements ResultSet, SQLCloseable {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixResultSet.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixResultSet.class);
 
     private final static String STRING_FALSE = "0";
     private final static BigDecimal BIG_DECIMAL_FALSE = BigDecimal.valueOf(0);
@@ -924,7 +924,7 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable {
 
     @Override
     public void setFetchSize(int rows) throws SQLException {
-        LOG.warn("Ignoring setFetchSize(" + rows + ")");
+        LOGGER.warn("Ignoring setFetchSize(" + rows + ")");
     }
 
     @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
index ef5559c..7433f6a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
@@ -20,11 +20,11 @@ package org.apache.phoenix.log;
 import java.util.Map;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
@@ -41,7 +41,7 @@ public class QueryLogger {
     private LogLevel logLevel;
     private Builder<QueryLogInfo, Object> queryLogBuilder = ImmutableMap.builder();
     private boolean isSynced;
-    private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
     
     private QueryLogger(PhoenixConnection connection) {
         this.queryId = UUID.randomUUID().toString();
@@ -105,15 +105,15 @@ public class QueryLogger {
         try {
             queryLogBuilder.put(queryLogInfo, info);
         } catch (Exception e) {
-            LOG.warn("Unable to add log info because of " + e.getMessage());
+            LOGGER.warn("Unable to add log info because of " + e.getMessage());
         }
     }
     
     private boolean publishLogs(RingBufferEventTranslator translator) {
         if (queryDisruptor == null) { return false; }
         boolean isLogged = queryDisruptor.tryPublish(translator);
-        if (!isLogged && LOG.isDebugEnabled()) {
-            LOG.debug("Unable to write query log in table as ring buffer queue is full!!");
+        if (!isLogged && LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Unable to write query log in table as ring buffer queue is full!!");
         }
         return isLogged;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index 1f2240e..c4f227a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -24,10 +24,10 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.QueryServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.lmax.disruptor.BlockingWaitStrategy;
@@ -44,7 +44,7 @@ public class QueryLoggerDisruptor implements Closeable{
     private boolean isClosed = false;
     //number of elements to create within the ring buffer.
     private static final int RING_BUFFER_SIZE = 8 * 1024;
-    private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
     private static final String DEFAULT_WAIT_STRATEGY = BlockingWaitStrategy.class.getName();
     
     public QueryLoggerDisruptor(Configuration configuration) throws SQLException{
@@ -76,7 +76,7 @@ public class QueryLoggerDisruptor implements Closeable{
 
         final QueryLogDetailsEventHandler[] handlers = { new QueryLogDetailsEventHandler(configuration) };
         disruptor.handleEventsWith(handlers);
-        LOG.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
+        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
                 + ", waitStrategy=" + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
                 + errorHandler + "...");
         disruptor.start();
@@ -103,7 +103,7 @@ public class QueryLoggerDisruptor implements Closeable{
     @Override
     public void close() throws IOException {
         isClosed = true;
-        LOG.info("Shutting down QueryLoggerDisruptor..");
+        LOGGER.info("Shutting down QueryLoggerDisruptor..");
         try {
             //we can wait for 2 seconds, so that backlog can be committed
             disruptor.shutdown(2, TimeUnit.SECONDS);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
index 0209951..6a7c0b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
@@ -27,11 +27,11 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -40,7 +40,7 @@ import com.google.common.collect.ImmutableMap;
  * 
  */
 public class TableLogWriter implements LogWriter {
-    private static final Log LOG = LogFactory.getLog(LogWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LogWriter.class);
     private Connection connection;
     private boolean isClosed;
     private PreparedStatement upsertStatement;
@@ -84,7 +84,7 @@ public class TableLogWriter implements LogWriter {
     @Override
     public void write(RingBufferEvent event) throws SQLException, IOException, ClassNotFoundException {
         if (isClosed()) {
-            LOG.warn("Unable to commit query log as Log committer is already closed");
+            LOGGER.warn("Unable to commit query log as Log committer is already closed");
             return;
         }
         if (connection == null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 8e18bf9..c5482e7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -73,7 +73,7 @@ import com.google.common.collect.Lists;
  */
 public abstract class AbstractBulkLoadTool extends Configured implements Tool {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(AbstractBulkLoadTool.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractBulkLoadTool.class);
 
     static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)");
     static final Option INPUT_PATH_OPT = new Option("i", "input", true, "Input path(s) (comma-separated, mandatory)");
@@ -192,10 +192,10 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job.
             String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt());
             PhoenixDriver.ConnectionInfo info = PhoenixDriver.ConnectionInfo.create(zkQuorum);
-            LOG.info("Configuring HBase connection to {}", info);
+            LOGGER.info("Configuring HBase connection to {}", info);
             for (Map.Entry<String,String> entry : info.asProps()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Setting {} = {}", entry.getKey(), entry.getValue());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Setting {} = {}", entry.getKey(), entry.getValue());
                 }
                 conf.set(entry.getKey(), entry.getValue());
             }
@@ -206,8 +206,8 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
         }
 
         final Connection conn = QueryUtil.getConnection(conf);
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
                     qualifiedTableName);
         }
         List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName);
@@ -308,7 +308,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
         // give subclasses their hook
         setupJob(job);
 
-        LOG.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
+        LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
         boolean success = job.waitForCompletion(true);
 
         if (success) {
@@ -316,7 +316,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
                 try {
                     table = new HTable(job.getConfiguration(), qualifiedTableName);
                     if(!IndexUtil.matchingSplitKeys(splitKeysBeforeJob, table.getRegionLocator().getStartKeys())) {
-                        LOG.error("The table "
+                        LOGGER.error("The table "
                                 + qualifiedTableName
                                 + " has local indexes and there is split key mismatch before and"
                                 + " after running bulkload job. Please rerun the job otherwise"
@@ -327,11 +327,11 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
                     if (table != null) table.close();
                 }
             }
-            LOG.info("Loading HFiles from {}", outputPath);
+            LOGGER.info("Loading HFiles from {}", outputPath);
             completebulkload(conf,outputPath,tablesToBeLoaded);
-            LOG.info("Removing output directory {}", outputPath);
+            LOGGER.info("Removing output directory {}", outputPath);
             if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
-                LOG.error("Failed to delete the output directory {}", outputPath);
+                LOGGER.error("Failed to delete the output directory {}", outputPath);
             }
             return 0;
         } else {
@@ -350,9 +350,9 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             String tableName = table.getPhysicalName();
             Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputPath, tableName);
             try(HTable htable = new HTable(conf,tableName)) {
-                LOG.info("Loading HFiles for {} from {}", tableName , tableOutputPath);
+                LOGGER.info("Loading HFiles for {} from {}", tableName , tableOutputPath);
                 loader.doBulkLoad(tableOutputPath, htable);
-                LOG.info("Incremental load complete for table=" + tableName);
+                LOGGER.info("Incremental load complete for table=" + tableName);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index 93ab188..cc23c43 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -77,7 +77,7 @@ import com.google.common.collect.Lists;
 public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWritable, Text, TableRowkeyPair,
         ImmutableBytesWritable> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
 
     protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import";
 
@@ -396,7 +396,7 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
 
         @Override
         public void errorOnRecord(T record, Throwable throwable) {
-            LOG.error("Error on record " + record, throwable);
+            LOGGER.error("Error on record " + record, throwable);
             context.getCounter(COUNTER_GROUP_NAME, "Errors on records").increment(1L);
             if (!ignoreRecordErrors) {
                 Throwables.propagate(throwable);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
index 72af1a7..52d539b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
@@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory;
 public class FormatToKeyValueReducer
         extends Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(FormatToKeyValueReducer.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToKeyValueReducer.class);
 
 
     protected List<String> tableNames;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 30f21ce..a8de1d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -84,7 +84,7 @@ import com.google.common.collect.Sets;
  */
 public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Cell> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(MultiHfileOutputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultiHfileOutputFormat.class);
 
     private static final String COMPRESSION_FAMILIES_CONF_KEY =
         "hbase.hfileoutputformat.families.compression";
@@ -196,7 +196,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
           private void rollWriters() throws IOException {
               for (WriterLength wl : this.writers.values()) {
                   if (wl.writer != null) {
-                      LOG.info("Writer=" + wl.writer.getPath() +
+                      LOGGER.info("Writer=" + wl.writer.getPath() +
                               ((wl.written == 0)? "": ", wrote=" + wl.written));
                       close(wl.writer);
                   }
@@ -470,7 +470,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
     private static void writePartitions(Configuration conf, Path partitionsPath,
             Set<TableRowkeyPair> tablesStartKeys) throws IOException {
         
-        LOG.info("Writing partition information to " + partitionsPath);
+        LOGGER.info("Writing partition information to " + partitionsPath);
         if (tablesStartKeys.isEmpty()) {
           throw new IllegalArgumentException("No regions passed");
         }
@@ -688,11 +688,11 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
                conf.set(tableName, tableDefns);
                
                TargetTableRef tbl = TargetTableRefFunctions.FROM_JSON.apply(tableDefns);
-               LOG.info(" the table logical name is "+ tbl.getLogicalName());
+               LOGGER.info(" the table logical name is "+ tbl.getLogicalName());
            }
        }
     
-       LOG.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
+       LOGGER.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
        job.setNumReduceTasks(tablesStartKeys.size());
 
        configurePartitioner(job, tablesStartKeys);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index fba01a3..db58deb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -79,7 +79,7 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class OrphanViewTool extends Configured implements Tool {
-    private static final Logger LOG = LoggerFactory.getLogger(OrphanViewTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewTool.class);
     // Query all the views that are not "MAPPED" views
     private static final String viewQuery = "SELECT " +
             TENANT_ID + ", " +
@@ -416,7 +416,7 @@ public class OrphanViewTool extends Configured implements Tool {
                         new DropTableStatement(pTableName, PTableType.VIEW, false, true, true));
             }
             catch (TableNotFoundException e) {
-                LOG.info("Ignoring view " + pTableName + " as it has already been dropped");
+                LOGGER.info("Ignoring view " + pTableName + " as it has already been dropped");
             }
         } finally {
             if (newConn) {
@@ -805,7 +805,7 @@ public class OrphanViewTool extends Configured implements Tool {
                 connection.close();
             }
         } catch (SQLException sqlE) {
-            LOG.error("Failed to close connection: ", sqlE);
+            LOGGER.error("Failed to close connection: ", sqlE);
             throw new RuntimeException("Failed to close connection with exception: ", sqlE);
         }
     }
@@ -883,7 +883,7 @@ public class OrphanViewTool extends Configured implements Tool {
             }
             return 0;
         } catch (Exception ex) {
-            LOG.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
+            LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
                     ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index 136548e..4153e1a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -25,8 +25,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
@@ -53,6 +51,8 @@ import org.apache.phoenix.query.HBaseFactoryProvider;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.schema.stats.StatisticsUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -63,7 +63,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWritable,T> {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixInputFormat.class);
        
     /**
      * instantiated by framework
@@ -122,8 +122,8 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
 
             if(splitByStats) {
                 for(Scan aScan: scans) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
                                 .getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" +
                                 aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan
                                 .getBatch() + "] and  regionLocation : " + regionLocation);
@@ -132,18 +132,18 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
                     psplits.add(new PhoenixInputSplit(Collections.singletonList(aScan), regionSize, regionLocation));
                 }
                 } else {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
                             .get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans
                             .size() - 1).getStopRow()));
-                    LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
+                    LOGGER.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
                             .get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " +
                             "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks()
                             + ", " + scans.get(0).getBatch() + "] and  regionLocation : " +
                             regionLocation);
 
                     for (int i = 0, limit = scans.size(); i < limit; i++) {
-                        LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
+                        LOGGER.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
                                 .toStringBinary(scans.get(i).getAttribute
                                         (BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
                     }
@@ -219,7 +219,7 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
               return queryPlan;
             }
         } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",
+            LOGGER.error(String.format("Failed to get the query plan with error [%s]",
                 exception.getMessage()));
             throw new RuntimeException(exception);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
index 4217e40..055ce1f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
@@ -22,8 +22,6 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -31,13 +29,15 @@ import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * {@link OutputFormat} implementation for Phoenix.
  *
  */
 public class PhoenixOutputFormat <T extends DBWritable> extends OutputFormat<NullWritable,T> {
-    private static final Log LOG = LogFactory.getLog(PhoenixOutputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixOutputFormat.class);
     private final Set<String> propsToIgnore;
     
     public PhoenixOutputFormat() {
@@ -65,7 +65,7 @@ public class PhoenixOutputFormat <T extends DBWritable> extends OutputFormat<Nul
         try {
             return new PhoenixRecordWriter<T>(context.getConfiguration(), propsToIgnore);
         } catch (SQLException e) {
-            LOG.error("Error calling PhoenixRecordWriter "  + e.getMessage());
+            LOGGER.error("Error calling PhoenixRecordWriter "  + e.getMessage());
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index 3c4db8c..b3493cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -22,8 +22,6 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -50,6 +48,8 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.monitoring.ReadMetricQueue;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.query.ConnectionQueryServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
@@ -60,7 +60,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<NullWritable,T> {
     
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordReader.class);
     protected final Configuration  configuration;
     protected final QueryPlan queryPlan;
     private NullWritable key =  NullWritable.get();
@@ -83,7 +83,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
            try {
                resultIterator.close();
         } catch (SQLException e) {
-           LOG.error(" Error closing resultset.");
+           LOGGER.error(" Error closing resultset.");
            throw new RuntimeException(e);
         }
        }
@@ -109,7 +109,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
         final PhoenixInputSplit pSplit = (PhoenixInputSplit)split;
         final List<Scan> scans = pSplit.getScans();
         try {
-            LOG.info("Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange());
+            LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange());
             List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
             StatementContext ctx = queryPlan.getContext();
             ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
@@ -135,7 +135,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
                   final TableSnapshotResultIterator tableSnapshotResultIterator = new TableSnapshotResultIterator(configuration, scan,
                       scanMetricsHolder);
                     peekingResultIterator = LookAheadResultIterator.wrap(tableSnapshotResultIterator);
-                    LOG.info("Adding TableSnapshotResultIterator for scan: " + scan);
+                    LOGGER.info("Adding TableSnapshotResultIterator for scan: " + scan);
                 } else {
                   final TableResultIterator tableResultIterator =
                       new TableResultIterator(
@@ -143,7 +143,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
                           scanMetricsHolder, renewScannerLeaseThreshold, queryPlan,
                           MapReduceParallelScanGrouper.getInstance());
                   peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator);
-                  LOG.info("Adding TableResultIterator for scan: " + scan);
+                  LOGGER.info("Adding TableResultIterator for scan: " + scan);
                 }
                 iterators.add(peekingResultIterator);
             }
@@ -157,7 +157,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
 
             this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
         } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
             Throwables.propagate(e);
         }
    }
@@ -178,7 +178,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
             value.readFields(resultSet);
             return true;
         } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
index b67ba74..6f5b84e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
@@ -24,8 +24,6 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.RecordWriter;
@@ -33,6 +31,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Default {@link RecordWriter} implementation from Phoenix
@@ -40,7 +40,7 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
  */
 public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<NullWritable, T> {
     
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordWriter.class);
     
     private final Connection conn;
     private final PreparedStatement statement;
@@ -73,7 +73,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
         try {
             conn.commit();
          } catch (SQLException e) {
-             LOG.error("SQLException while performing the commit for the task.");
+             LOGGER.error("SQLException while performing the commit for the task.");
              throw new RuntimeException(e);
           } finally {
             try {
@@ -81,7 +81,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
               conn.close();
             }
             catch (SQLException ex) {
-              LOG.error("SQLException while closing the connection for the task.");
+              LOGGER.error("SQLException while closing the connection for the task.");
               throw new RuntimeException(ex);
             }
           }
@@ -94,7 +94,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
             numRecords++;
             statement.execute();
             if (numRecords % batchSize == 0) {
-                LOG.debug("commit called on a batch of size : " + batchSize);
+                LOGGER.debug("commit called on a batch of size : " + batchSize);
                 conn.commit();
             }
         } catch (SQLException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
index f8ec393..76d5a83 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
@@ -22,8 +22,6 @@ import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -38,6 +36,8 @@ import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.schema.*;
 import org.apache.phoenix.util.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolDataTableName;
@@ -50,7 +50,7 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getInde
 public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends PhoenixInputFormat {
     QueryPlan queryPlan = null;
 
-    private static final Log LOG = LogFactory.getLog(PhoenixServerBuildIndexInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
 
     /**
      * instantiated by framework
@@ -103,7 +103,7 @@ public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends Ph
             queryPlan.iterator(MapReduceParallelScanGrouper.getInstance());
             return queryPlan;
         } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",
+            LOGGER.error(String.format("Failed to get the query plan with error [%s]",
                     exception.getMessage()));
             throw new RuntimeException(exception);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java
index cc170f5..9f8080f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java
@@ -50,7 +50,7 @@ public class PhoenixTextInputFormat extends TextInputFormat {
   }
 
   public static class PhoenixLineRecordReader extends RecordReader<LongWritable,Text> {
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixLineRecordReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixLineRecordReader.class);
     private final LineRecordReader rr;
     private PhoenixLineRecordReader(LineRecordReader rr) {
       this.rr = rr;
@@ -62,10 +62,10 @@ public class PhoenixTextInputFormat extends TextInputFormat {
       final Configuration conf = context.getConfiguration();
       final FileSplit split = (FileSplit) genericSplit;
       if (conf.getBoolean(SKIP_HEADER_KEY, false) && split.getStart() == 0) {
-        LOG.trace("Consuming first key-value from {}", genericSplit);
+        LOGGER.trace("Consuming first key-value from {}", genericSplit);
         nextKeyValue();
       } else {
-        LOG.trace("Not configured to skip header or not the first input split: {}", split);
+        LOGGER.trace("Not configured to skip header or not the first input split: {}", split);
       }
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
index f63923d..798183c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
@@ -46,7 +46,7 @@ import com.google.common.base.Preconditions;
  */
 public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(RegexToKeyValueMapper.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(RegexToKeyValueMapper.class);
 
     /** Configuration key for the regex */
     public static final String REGEX_CONFKEY = "phoenix.mapreduce.import.regex";
@@ -110,7 +110,7 @@ public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>
 			Map<String, Object> data = new HashMap<>();
 			Matcher m = inputPattern.matcher(input);
 			if (m.groupCount() != columnInfoList.size()) {
-				LOG.debug(String.format("based on the regex and input, input fileds %s size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
+				LOGGER.debug(String.format("based on the regex and input, input fileds %s size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
 				return data;
 			}
 			
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
index 323a98b..8ef1836 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
@@ -39,7 +39,7 @@ import com.google.common.collect.Lists;
  * Writes mutations directly to HBase using HBase front-door APIs.
  */
 public class DirectHTableWriter {
-    private static final Logger LOG = LoggerFactory.getLogger(DirectHTableWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(DirectHTableWriter.class);
 
     private Configuration conf = null;
 
@@ -60,9 +60,9 @@ public class DirectHTableWriter {
         try {
             this.table = new HTable(this.conf, tableName);
             this.table.setAutoFlush(false, true);
-            LOG.info("Created table instance for " + tableName);
+            LOGGER.info("Created table instance for " + tableName);
         } catch (IOException e) {
-            LOG.error("IOException : ", e);
+            LOGGER.error("IOException : ", e);
             tryClosingResourceSilently(this.table);
             throw new RuntimeException(e);
         }
@@ -103,7 +103,7 @@ public class DirectHTableWriter {
             try {
                 res.close();
             } catch (IOException e) {
-                LOG.error("Closing resource: " + res + " failed with error: ", e);
+                LOGGER.error("Closing resource: " + res + " failed with error: ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
index c651077..da6e6e1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
@@ -59,7 +59,7 @@ import com.google.common.base.Joiner;
  */
 public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWritable, Text, Text> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexScrutinyMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyMapper.class);
     private Connection connection;
     private List<ColumnInfo> targetTblColumnMetadata;
     private long batchSize;
@@ -146,7 +146,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
                     PhoenixRuntime.generateColumnInfo(connection, qTargetTable, targetColNames);
             sourceTblColumnMetadata =
                     PhoenixRuntime.generateColumnInfo(connection, qSourceTable, sourceColNames);
-            LOG.info("Target table base query: " + targetTableQuery);
+            LOGGER.info("Target table base query: " + targetTableQuery);
             md5 = MessageDigest.getInstance("MD5");
         } catch (SQLException | NoSuchAlgorithmException e) {
             tryClosingResourceSilently(this.outputUpsertStmt);
@@ -161,7 +161,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
             try {
                 res.close();
             } catch (Exception e) {
-                LOG.error("Closing resource: " + res + " failed :", e);
+                LOGGER.error("Closing resource: " + res + " failed :", e);
             }
         }
     }
@@ -184,7 +184,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
             }
             context.progress(); // Make sure progress is reported to Application Master.
         } catch (SQLException | IllegalArgumentException e) {
-            LOG.error(" Error while read/write of a record ", e);
+            LOGGER.error(" Error while read/write of a record ", e);
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new IOException(e);
         }
@@ -200,7 +200,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
                 processBatch(context);
                 connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
                 throwException = new IOException(e);
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index a1e78d5..c201f02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -71,7 +71,7 @@ import com.google.common.collect.Lists;
  */
 public class IndexScrutinyTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexScrutinyTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyTool.class);
 
     private static final Option SCHEMA_NAME_OPTION =
             new Option("s", "schema", true, "Phoenix schema name (optional)");
@@ -271,7 +271,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
             final String selectQuery =
                     QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, null,
                         Hint.NO_INDEX, true);
-            LOG.info("Query used on source table to feed the mapper: " + selectQuery);
+            LOGGER.info("Query used on source table to feed the mapper: " + selectQuery);
 
             PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat);
             // if outputting to table, setup the upsert to the output table
@@ -280,7 +280,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                         IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols,
                             targetDynamicCols, connection);
                 PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt);
-                LOG.info("Upsert statement used for output table: " + upsertStmt);
+                LOGGER.info("Upsert statement used for output table: " + upsertStmt);
             }
 
             final String jobName =
@@ -375,7 +375,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
             if (useTenantId) {
                 tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt());
                 configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
-                LOG.info(String.format("IndexScrutinyTool uses a tenantId %s", tenantId));
+                LOGGER.info(String.format("IndexScrutinyTool uses a tenantId %s", tenantId));
             }
             connection = ConnectionUtil.getInputConnection(configuration);
             final String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt());
@@ -431,7 +431,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                 }
             }
 
-            LOG.info(String.format(
+            LOGGER.info(String.format(
                 "Running scrutiny [schemaName=%s, dataTable=%s, indexTable=%s, useSnapshot=%s, timestamp=%s, batchSize=%s, outputBasePath=%s, outputFormat=%s, outputMaxRows=%s]",
                 schemaName, dataTable, indexTable, useSnapshot, ts, batchSize, basePath,
                 outputFormat, outputMaxRows));
@@ -451,13 +451,13 @@ public class IndexScrutinyTool extends Configured implements Tool {
             }
 
             if (!isForeground) {
-                LOG.info("Running Index Scrutiny in Background - Submit async and exit");
+                LOGGER.info("Running Index Scrutiny in Background - Submit async and exit");
                 for (Job job : jobs) {
                     job.submit();
                 }
                 return 0;
             }
-            LOG.info(
+            LOGGER.info(
                 "Running Index Scrutiny in Foreground. Waits for the build to complete. This may take a long time!.");
             boolean result = true;
             for (Job job : jobs) {
@@ -466,7 +466,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
 
             // write the results to the output metadata table
             if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) {
-                LOG.info("Writing results of jobs to output table "
+                LOGGER.info("Writing results of jobs to output table "
                         + IndexScrutinyTableOutput.OUTPUT_METADATA_TABLE_NAME);
                 IndexScrutinyTableOutput.writeJobResults(connection, args, jobs);
             }
@@ -474,11 +474,11 @@ public class IndexScrutinyTool extends Configured implements Tool {
             if (result) {
                 return 0;
             } else {
-                LOG.error("IndexScrutinyTool job failed! Check logs for errors..");
+                LOGGER.error("IndexScrutinyTool job failed! Check logs for errors..");
                 return -1;
             }
         } catch (Exception ex) {
-            LOG.error("An exception occurred while performing the indexing job: "
+            LOGGER.error("An exception occurred while performing the indexing job: "
                     + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
@@ -487,7 +487,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                     connection.close();
                 }
             } catch (SQLException sqle) {
-                LOG.error("Failed to close connection ", sqle.getMessage());
+                LOGGER.error("Failed to close connection ", sqle.getMessage());
                 throw new RuntimeException("Failed to close connection");
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 16fb538..2cd841f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -115,7 +115,7 @@ import com.google.common.collect.Lists;
  */
 public class IndexTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexTool.class);
 
     private String schemaName;
     private String dataTable;
@@ -654,7 +654,7 @@ public class IndexTool extends Configured implements Tool {
                     int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt);
                     String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt());
                     double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt);
-                    LOG.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
+                    LOGGER.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
 
                     splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, samplingRate, configuration);
                 }
@@ -672,11 +672,11 @@ public class IndexTool extends Configured implements Tool {
             job = jobFactory.getJob();
 
             if (!isForeground && useDirectApi) {
-                LOG.info("Running Index Build in Background - Submit async and exit");
+                LOGGER.info("Running Index Build in Background - Submit async and exit");
                 job.submit();
                 return 0;
             }
-            LOG.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
+            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
             boolean result = job.waitForCompletion(true);
             
             if (result) {
@@ -684,7 +684,7 @@ public class IndexTool extends Configured implements Tool {
                     if (isLocalIndexBuild) {
                         validateSplitForLocalIndex(splitKeysBeforeJob, htable);
                     }
-                    LOG.info("Loading HFiles from {}", outputPath);
+                    LOGGER.info("Loading HFiles from {}", outputPath);
                     LoadIncrementalHFiles loader = new LoadIncrementalHFiles(configuration);
                     loader.doBulkLoad(outputPath, htable);
                     htable.close();
@@ -694,11 +694,11 @@ public class IndexTool extends Configured implements Tool {
                 }
                 return 0;
             } else {
-                LOG.error("IndexTool job failed! Check logs for errors..");
+                LOGGER.error("IndexTool job failed! Check logs for errors..");
                 return -1;
             }
         } catch (Exception ex) {
-            LOG.error("An exception occurred while performing the indexing job: "
+            LOGGER.error("An exception occurred while performing the indexing job: "
                     + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
@@ -708,7 +708,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         connection.close();
                     } catch (SQLException e) {
-                        LOG.error("Failed to close connection ", e);
+                        LOGGER.error("Failed to close connection ", e);
                         rethrowException = true;
                     }
                 }
@@ -716,7 +716,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         htable.close();
                     } catch (IOException e) {
-                        LOG.error("Failed to close htable ", e);
+                        LOGGER.error("Failed to close htable ", e);
                         rethrowException = true;
                     }
                 }
@@ -724,7 +724,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         jobFactory.closeConnection();
                     } catch (SQLException e) {
-                        LOG.error("Failed to close jobFactory ", e);
+                        LOGGER.error("Failed to close jobFactory ", e);
                         rethrowException = true;
                     }
                 }
@@ -764,7 +764,7 @@ public class IndexTool extends Configured implements Tool {
                         .getTable(pDataTable.getPhysicalName().getBytes())) {
             numRegions = hDataTable.getRegionLocator().getStartKeys().length;
             if (autosplit && !(numRegions > autosplitNumRegions)) {
-                LOG.info(String.format(
+                LOGGER.info(String.format(
                     "Will not split index %s because the data table only has %s regions, autoSplitNumRegions=%s",
                     pIndexTable.getPhysicalName(), numRegions, autosplitNumRegions));
                 return; // do nothing if # of regions is too low
@@ -850,7 +850,7 @@ public class IndexTool extends Configured implements Tool {
             String errMsg = "The index to build is local index and the split keys are not matching"
                     + " before and after running the job. Please rerun the job otherwise"
                     + " there may be inconsistencies between actual data and index data";
-            LOG.error(errMsg);
+            LOGGER.error(errMsg);
             throw new Exception(errMsg);
         }
         return true;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
index 2dc7551..98ac5e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
@@ -38,7 +38,7 @@ public class IndexToolUtil {
 
 	private static final String ALTER_INDEX_QUERY_TEMPLATE = "ALTER INDEX IF EXISTS %s ON %s %s";  
     
-	private static final Logger LOG = LoggerFactory.getLogger(IndexToolUtil.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(IndexToolUtil.class);
 	
 	/**
 	 * Updates the index state.
@@ -74,7 +74,7 @@ public class IndexToolUtil {
         Preconditions.checkNotNull(connection);
         final String alterQuery = String.format(ALTER_INDEX_QUERY_TEMPLATE,indexTable,masterTable,state.name());
         connection.createStatement().execute(alterQuery);
-        LOG.info(" Updated the status of the index {} to {} " , indexTable , state.name());
+        LOGGER.info(" Updated the status of the index {} to {} " , indexTable , state.name());
     }
 	
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
index e148f67..eca3a9e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
 public class PhoenixIndexImportDirectMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class);
 
     private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable();
 
@@ -100,7 +100,7 @@ public class PhoenixIndexImportDirectMapper extends
             //Get batch size in terms of bytes
             batchSizeBytes = ((PhoenixConnection) connection).getMutateBatchSizeBytes();
 
-            LOG.info("Mutation Batch Size = " + batchSize);
+            LOGGER.info("Mutation Batch Size = " + batchSize);
 
             final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration);
             this.pStatement = connection.prepareStatement(upsertQuery);
@@ -138,7 +138,7 @@ public class PhoenixIndexImportDirectMapper extends
             // Make sure progress is reported to Application Master.
             context.progress();
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount);
             throw new RuntimeException(e);
         }
@@ -176,7 +176,7 @@ public class PhoenixIndexImportDirectMapper extends
                 new IntWritable(0));
             super.cleanup(context);
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount);
             throw new RuntimeException(e);
         } finally {
@@ -189,7 +189,7 @@ public class PhoenixIndexImportDirectMapper extends
             try {
                 this.connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
         if (this.writer != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 57688fd..0813620 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -49,7 +49,7 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDU
 public class PhoenixIndexImportDirectReducer extends
         Reducer<ImmutableBytesWritable, IntWritable, NullWritable, NullWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
 
     @Override
     protected void cleanup(Context context) throws IOException, InterruptedException{
@@ -58,7 +58,7 @@ public class PhoenixIndexImportDirectReducer extends
 
             updateTasksTable(context);
         } catch (SQLException e) {
-            LOG.error(" Failed to update the status to Active");
+            LOGGER.error(" Failed to update the status to Active");
             throw new RuntimeException(e.getMessage());
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
index 88ddc2b..b1a14b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
@@ -58,7 +58,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, KeyValue> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportMapper.class);
     
     private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable();
     
@@ -155,7 +155,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             }
             connection.rollback();
        } catch (SQLException e) {
-           LOG.error("Error {}  while read/write of a record ",e.getMessage());
+           LOGGER.error("Error {}  while read/write of a record ",e.getMessage());
            context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
            throw new RuntimeException(e);
         } 
@@ -172,7 +172,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             try {
                 connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index 2077137..b168032 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
 
     private PhoenixConnection connection;
 
@@ -92,7 +92,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                     services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                         QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
             batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
-            LOG.info("Mutation Batch Size = " + batchSize);
+            LOGGER.info("Mutation Batch Size = " + batchSize);
             this.mutations = Lists.newArrayListWithExpectedSize(batchSize);
             maintainers=new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration));
         } catch (SQLException e) {
@@ -142,7 +142,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
             // Make sure progress is reported to Application Master.
             context.progress();
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         }
@@ -167,7 +167,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                 new IntWritable(0));
             super.cleanup(context);
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         } finally {
@@ -180,7 +180,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
             try {
                 this.connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
         if (this.writer != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
index 34bcc9b..0544d02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
 public class PhoenixServerBuildIndexMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
 
     @Override
     protected void setup(final Context context) throws IOException, InterruptedException {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 31e657a..954ee23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -39,8 +39,6 @@ import java.util.concurrent.TimeoutException;
 
 import javax.security.auth.login.AppConfigurationEntry;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -60,6 +58,8 @@ import org.apache.phoenix.util.UpgradeUtil;
 import org.apache.phoenix.util.ZKBasedMasterElectionUtil;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
@@ -116,7 +116,7 @@ public class PhoenixMRJobSubmitter {
     private static final int JOB_SUBMIT_POOL_TIMEOUT = 5;
     private Configuration conf;
     private String zkQuorum;
-    private static final Log LOG = LogFactory.getLog(PhoenixMRJobSubmitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobSubmitter.class);
 
     public PhoenixMRJobSubmitter() throws IOException {
         this(null);
@@ -158,11 +158,11 @@ public class PhoenixMRJobSubmitter {
 
         switch (type) {
         case CAPACITY:
-            LOG.info("Applying the Capacity Scheduler Queue Configurations");
+            LOGGER.info("Applying the Capacity Scheduler Queue Configurations");
             PhoenixMRJobUtil.updateCapacityQueueInfo(conf);
             break;
         case FAIR:
-            LOG.warn("Fair Scheduler type is not yet supported");
+            LOGGER.warn("Fair Scheduler type is not yet supported");
             throw new IOException("Fair Scheduler is not yet supported");
         case NONE:
         default:
@@ -184,7 +184,7 @@ public class PhoenixMRJobSubmitter {
         AppConfigurationEntry entries[] =
                 javax.security.auth.login.Configuration.getConfiguration()
                         .getAppConfigurationEntry("Client");
-        LOG.info("Security - Fetched App Login Configuration Entries");
+        LOGGER.info("Security - Fetched App Login Configuration Entries");
         if (entries != null) {
             for (AppConfigurationEntry entry : entries) {
                 if (entry.getOptions().get(PRINCIPAL) != null) {
@@ -194,12 +194,12 @@ public class PhoenixMRJobSubmitter {
                     keyTabPath = (String) entry.getOptions().get(KEYTAB);
                 }
             }
-            LOG.info("Security - Got Principal = " + principal + "");
+            LOGGER.info("Security - Got Principal = " + principal + "");
             if (principal != null && keyTabPath != null) {
-                LOG.info("Security - Retreiving the TGT with principal:" + principal
+                LOGGER.info("Security - Retreiving the TGT with principal:" + principal
                         + " and keytab:" + keyTabPath);
                 UserGroupInformation.loginUserFromKeytab(principal, keyTabPath);
-                LOG.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
+                LOGGER.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
                         + keyTabPath);
             }
         }
@@ -237,7 +237,7 @@ public class PhoenixMRJobSubmitter {
 
         if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
             AUTO_INDEX_BUILD_LOCK_NAME)) {
-            LOG.info("Some other node is already running Automated Index Build. Skipping execution!");
+            LOGGER.info("Some other node is already running Automated Index Build. Skipping execution!");
             return -1;
         }
         // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
@@ -247,22 +247,22 @@ public class PhoenixMRJobSubmitter {
 
         // Get Candidate indexes to be built
         Map<String, PhoenixAsyncIndex> candidateJobs = getCandidateJobs();
-        LOG.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
+        LOGGER.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
 
         // Get already scheduled Jobs list from Yarn Resource Manager
         Set<String> submittedJobs = getSubmittedYarnApps();
-        LOG.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
+        LOGGER.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
 
         // Get final jobs to submit
         Set<PhoenixAsyncIndex> jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs);
 
-        LOG.info("Final indexes to be built - " + jobsToSchedule);
+        LOGGER.info("Final indexes to be built - " + jobsToSchedule);
         List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(jobsToSchedule.size());
 
         int failedJobSubmissionCount = 0;
         int timedoutJobSubmissionCount = 0;
         ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10);
-        LOG.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
+        LOGGER.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
 
         try {
             for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) {
@@ -285,7 +285,7 @@ public class PhoenixMRJobSubmitter {
             PhoenixMRJobUtil.shutdown(jobSubmitPool);
         }
 
-        LOG.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = "
+        LOGGER.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = "
                 + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount
                 + " ; Timed out = " + timedoutJobSubmissionCount);
         return failedJobSubmissionCount;
@@ -312,7 +312,7 @@ public class PhoenixMRJobSubmitter {
                 + "," + YarnApplication.state.RUNNING);
         int rmPort = PhoenixMRJobUtil.getRMPort(conf);
         String response = PhoenixMRJobUtil.getJobsInformationFromRM(rmHost, rmPort, urlParams);
-        LOG.debug("Already Submitted/Running Apps = " + response);
+        LOGGER.debug("Already Submitted/Running Apps = " + response);
         JSONObject jobsJson = new JSONObject(response);
         JSONObject appsJson = jobsJson.optJSONObject(YarnApplication.APPS_ELEMENT);
         Set<String> yarnApplicationSet = new HashSet<String>();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index 83e2274..2709297 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -68,7 +68,7 @@ import com.google.common.collect.Lists;
  */
 public final class PhoenixConfigurationUtil {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixConfigurationUtil.class);
 
     public static final String SESSION_ID = "phoenix.sessionid";
     
@@ -332,7 +332,7 @@ public final class PhoenixConfigurationUtil {
             List<String> upsertColumnList =
                     PhoenixConfigurationUtil.getUpsertColumnNames(configuration);
             if(!upsertColumnList.isEmpty()) {
-                LOG.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s,"
+                LOGGER.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s,"
                                 + " upsertColumnList=%s ",!upsertColumnList.isEmpty(),
                         upsertColumnList.size(), Joiner.on(",").join(upsertColumnList)));
             }
@@ -357,11 +357,11 @@ public final class PhoenixConfigurationUtil {
         if (!upsertColumnNames.isEmpty()) {
             // Generating UPSERT statement without column name information.
             upsertStmt = QueryUtil.constructUpsertStatement(tableName, columnMetadataList);
-            LOG.info("Phoenix Custom Upsert Statement: "+ upsertStmt);
+            LOGGER.info("Phoenix Custom Upsert Statement: "+ upsertStmt);
         } else {
             // Generating UPSERT statement without column name information.
             upsertStmt = QueryUtil.constructGenericUpsertStatement(tableName, columnMetadataList.size());
-            LOG.info("Phoenix Generic Upsert Statement: " + upsertStmt);
+            LOGGER.info("Phoenix Generic Upsert Statement: " + upsertStmt);
         }
         configuration.set(UPSERT_STATEMENT, upsertStmt);
         return upsertStmt;
@@ -402,7 +402,7 @@ public final class PhoenixConfigurationUtil {
             final Configuration configuration) {
     	List<String> selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration);
         if(!selectColumnList.isEmpty()) {
-            LOG.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
+            LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
                     ,!selectColumnList.isEmpty(), selectColumnList.size(), Joiner.on(",").join(selectColumnList)
                     ));
         }
@@ -420,7 +420,7 @@ public final class PhoenixConfigurationUtil {
         final List<ColumnInfo> columnMetadataList = getSelectColumnMetadataList(configuration);
         final String conditions = configuration.get(INPUT_TABLE_CONDITIONS);
         selectStmt = QueryUtil.constructSelectStatement(tableName, columnMetadataList, conditions);
-        LOG.info("Select Statement: "+ selectStmt);
+        LOGGER.info("Select Statement: "+ selectStmt);
         configuration.set(SELECT_STATEMENT, selectStmt);
         return selectStmt;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
index 24950c4..86b54df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
@@ -17,14 +17,14 @@
  */
 package org.apache.phoenix.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class Metrics {
 
-    private static final Log LOG = LogFactory.getLog(Metrics.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Metrics.class);
 
   private static volatile MetricsSystem manager = DefaultMetricsSystem.instance();
 
@@ -35,13 +35,13 @@ public class Metrics {
     public static MetricsSystem initialize() {
         // if the jars aren't on the classpath, then we don't start the metrics system
         if (manager == null) {
-            LOG.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
+            LOGGER.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
             return null;
         }
         // only initialize the metrics system once
         synchronized (Metrics.class) {
             if (!initialized) {
-                LOG.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
+                LOGGER.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
                 manager.init(Metrics.METRICS_SYSTEM_NAME);
                 initialized = true;
             }
@@ -60,7 +60,7 @@ public class Metrics {
 
     public static void ensureConfigured() {
         if (!sinkInitialized) {
-            LOG.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
+            LOGGER.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
         }
     }
 }
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
index e7c7bae..810278d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
@@ -114,7 +114,7 @@ public enum GlobalClientMetrics {
     GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED),
     GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED);
 
-    private static final Logger LOG = LoggerFactory.getLogger(GlobalClientMetrics.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalClientMetrics.class);
     private static final boolean isGlobalMetricsEnabled = QueryServicesOptions.withDefaults().isGlobalMetricsEnabled();
     private MetricType metricType;
     private GlobalMetric metric;
@@ -143,7 +143,7 @@ public enum GlobalClientMetrics {
     }
 
     private static MetricRegistry createMetricRegistry() {
-        LOG.info("Creating Metric Registry for Phoenix Global Metrics");
+        LOGGER.info("Creating Metric Registry for Phoenix Global Metrics");
         MetricRegistryInfo registryInfo = new MetricRegistryInfo("PHOENIX", "Phoenix Client Metrics",
                 "phoenix", "Phoenix,sub=CLIENT", true);
         return MetricRegistries.global().create(registryInfo);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
index 381a757..3b9ec99 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
@@ -21,8 +21,6 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.metrics.Counter;
 import org.apache.hadoop.hbase.metrics.Gauge;
 import org.apache.hadoop.hbase.metrics.Histogram;
@@ -40,6 +38,8 @@ import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MutableHistogram;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Contents mostly copied from GlobalMetricRegistriesAdapter class from hbase-hadoop2-compat
@@ -48,7 +48,7 @@ import org.apache.phoenix.query.QueryServicesOptions;
  */
 public class GlobalMetricRegistriesAdapter {
 
-    private static final Log LOG = LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class);
     private static GlobalMetricRegistriesAdapter INSTANCE = new GlobalMetricRegistriesAdapter();
 
     private GlobalMetricRegistriesAdapter() {
@@ -62,7 +62,7 @@ public class GlobalMetricRegistriesAdapter {
 
     public void registerMetricRegistry(MetricRegistry registry) {
         if (registry == null) {
-            LOG.warn("Registry cannot be registered with Hadoop Metrics 2 since it is null.");
+            LOGGER.warn("Registry cannot be registered with Hadoop Metrics 2 since it is null.");
             return;
         }
 
@@ -74,7 +74,7 @@ public class GlobalMetricRegistriesAdapter {
      * Class to convert HBase Metric Objects to Hadoop Metrics2 Metric Objects
      */
     private static class HBaseMetrics2HadoopMetricsAdapter implements MetricsSource {
-        private static final Log LOG = LogFactory.getLog(HBaseMetrics2HadoopMetricsAdapter.class);
+        private static final Logger LOGGER = LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class);
         private final MetricRegistry registry;
         private final String metricTag;
 
@@ -85,7 +85,7 @@ public class GlobalMetricRegistriesAdapter {
 
         private void registerToDefaultMetricsSystem() {
             MetricRegistryInfo info = registry.getMetricRegistryInfo();
-            LOG.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
+            LOGGER.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
             DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), info.getMetricsDescription(), this);
         }
 
@@ -117,7 +117,7 @@ public class GlobalMetricRegistriesAdapter {
                 } else if (metric instanceof Timer) {
                     this.addTimer(name, (Timer)metric, builder);
                 } else {
-                    LOG.info("Ignoring unknown Metric class " + metric.getClass().getName());
+                    LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName());
                 }
             }
         }
@@ -134,7 +134,7 @@ public class GlobalMetricRegistriesAdapter {
             } else if (o instanceof Double) {
                 builder.addGauge(info, (Double)o);
             } else {
-                LOG.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
+                LOGGER.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
             }
 
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 3b80cbd..5d5ab6c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -58,6 +56,8 @@ import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -66,7 +66,7 @@ import com.google.common.collect.Maps;
  */
 public class DefaultStatisticsCollector implements StatisticsCollector {
 
-    private static final Log LOG = LogFactory.getLog(DefaultStatisticsCollector.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(DefaultStatisticsCollector.class);
     
     final Map<ImmutableBytesPtr, Pair<Long, GuidePostsInfoBuilder>> guidePostsInfoWriterMap = Maps.newHashMap();
     private final Table htable;
@@ -122,7 +122,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
         } catch (SQLException e) {
             throw new IOException(e);
         }
-        LOG.info("Initialization complete for " +
+        LOGGER.info("Initialization complete for " +
                 this.getClass() + " statistics collector for table " + tableName);
     }
 
@@ -141,12 +141,12 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
     private void initGuidepostDepth() throws IOException, SQLException {
         if (guidePostPerRegionBytes != null || guidePostWidthBytes != null) {
             getGuidePostDepthFromStatement();
-            LOG.info("Guide post depth determined from SQL statement: " + guidePostDepth);
+            LOGGER.info("Guide post depth determined from SQL statement: " + guidePostDepth);
         } else {
             long guidepostWidth = getGuidePostDepthFromSystemCatalog();
             if (guidepostWidth >= 0) {
                 this.guidePostDepth = guidepostWidth;
-                LOG.info("Guide post depth determined from SYSTEM.CATALOG: " + guidePostDepth);
+                LOGGER.info("Guide post depth determined from SYSTEM.CATALOG: " + guidePostDepth);
             } else {
                 this.guidePostDepth = StatisticsUtil.getGuidePostDepth(
                         configuration.getInt(
@@ -156,7 +156,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
                                 QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
                                 QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES),
                         region.getTableDesc());
-                LOG.info("Guide post depth determined from global configuration: " + guidePostDepth);
+                LOGGER.info("Guide post depth determined from global configuration: " + guidePostDepth);
             }
         }
 
@@ -211,7 +211,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
                 try {
                     htable.close();
                 } catch (IOException e) {
-                    LOG.warn("Failed to close " + htable.getName(), e);
+                    LOGGER.warn("Failed to close " + htable.getName(), e);
                 }
             }
         }
@@ -250,7 +250,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
                     EnvironmentEdgeManager.currentTimeMillis(), scan);
             commitStats(mutations);
         } catch (IOException e) {
-            LOG.error("Unable to update SYSTEM.STATS table.", e);
+            LOGGER.error("Unable to update SYSTEM.STATS table.", e);
         }
     }
 
@@ -284,21 +284,21 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
         for (ImmutableBytesPtr fam : fams) {
             if (delete) {
                 statsWriter.deleteStatsForRegion(region, this, fam, mutations);
-                LOG.info("Generated " + mutations.size() + " mutations to delete existing stats");
+                LOGGER.info("Generated " + mutations.size() + " mutations to delete existing stats");
             }
 
             // If we've disabled stats, don't write any, just delete them
             if (this.guidePostDepth > 0) {
                 int oldSize = mutations.size();
                 statsWriter.addStats(this, fam, mutations, guidePostDepth);
-                LOG.info("Generated " + (mutations.size() - oldSize) + " mutations for new stats");
+                LOGGER.info("Generated " + (mutations.size() - oldSize) + " mutations for new stats");
             }
         }
     }
 
     private void commitStats(List<Mutation> mutations) throws IOException {
         statsWriter.commitStats(mutations, this);
-        LOG.info("Committed " + mutations.size() + " mutations for stats");
+        LOGGER.info("Committed " + mutations.size() + " mutations for stats");
     }
 
     /**
@@ -388,7 +388,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
 
         ImmutableBytesPtr cfKey =
                 new ImmutableBytesPtr(store.getFamily().getName());
-        LOG.info("StatisticsScanner created for table: "
+        LOGGER.info("StatisticsScanner created for table: "
                 + tableName + " CF: " + store.getColumnFamilyName());
         return new StatisticsScanner(this, statsWriter, env, delegate, cfKey);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 08c2100..1e4df2c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -25,8 +25,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -38,12 +36,14 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The scanner that does the scanning to collect the stats during major compaction.{@link DefaultStatisticsCollector}
  */
 public class StatisticsScanner implements InternalScanner {
-    private static final Log LOG = LogFactory.getLog(StatisticsScanner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsScanner.class);
     private InternalScanner delegate;
     private StatisticsWriter statsWriter;
     private Region region;
@@ -95,7 +95,7 @@ public class StatisticsScanner implements InternalScanner {
         StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config);
         StatisticsScannerCallable callable = createCallable();
         if (getRegionServerServices().isStopping() || getRegionServerServices().isStopped()) {
-            LOG.debug("Not updating table statistics because the server is stopping/stopped");
+            LOGGER.debug("Not updating table statistics because the server is stopping/stopped");
             return;
         }
         if (!async) {
@@ -149,27 +149,27 @@ public class StatisticsScanner implements InternalScanner {
                 // Just verify if this if fine
                 ArrayList<Mutation> mutations = new ArrayList<Mutation>();
 
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations);
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().addStats(tracker, family,
                         mutations, tracker.getGuidePostDepth());
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().commitStats(mutations, tracker);
             } catch (IOException e) {
                 if (getRegionServerServices().isStopping() || getRegionServerServices().isStopped()) {
-                    LOG.debug("Ignoring error updating statistics because region is closing/closed");
+                    LOGGER.debug("Ignoring error updating statistics because region is closing/closed");
                 } else {
-                    LOG.error("Failed to update statistics table!", e);
+                    LOGGER.error("Failed to update statistics table!", e);
                     toThrow = e;
                 }
             } finally {
@@ -179,14 +179,14 @@ public class StatisticsScanner implements InternalScanner {
                     getTracker().close();// close the tracker
                 } catch (IOException e) {
                     if (toThrow == null) toThrow = e;
-                    LOG.error("Error while closing the stats table", e);
+                    LOGGER.error("Error while closing the stats table", e);
                 } finally {
                     // close the delegate scanner
                     try {
                         getDelegate().close();
                     } catch (IOException e) {
                         if (toThrow == null) toThrow = e;
-                        LOG.error("Error while closing the scanner", e);
+                        LOGGER.error("Error while closing the scanner", e);
                     } finally {
                         if (toThrow != null) { throw toThrow; }
                     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
index c55c07e..ccb3376 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
@@ -70,7 +70,7 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_IS_NAMESPACE
  */
 public class UpdateStatisticsTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(UpdateStatisticsTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpdateStatisticsTool.class);
 
     private static final Option TABLE_NAME_OPTION = new Option("t", "table", true,
             "Phoenix Table Name");
@@ -125,7 +125,7 @@ public class UpdateStatisticsTool extends Configured implements Tool {
             String physicalTableName =  SchemaUtil.getPhysicalTableName(tableName.getBytes(),
                     namespaceMapping).getNameAsString();
             admin.snapshot(snapshotName, physicalTableName);
-            LOG.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName);
+            LOGGER.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName);
         }
     }
 
@@ -141,7 +141,7 @@ public class UpdateStatisticsTool extends Configured implements Tool {
         try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) {
             HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
             admin.deleteSnapshot(snapshotName);
-            LOG.info("Successfully deleted snapshot " + snapshotName);
+            LOGGER.info("Successfully deleted snapshot " + snapshotName);
         }
     }
 
@@ -219,23 +219,23 @@ public class UpdateStatisticsTool extends Configured implements Tool {
                 CharStream.class, TransactionSystemClient.class, TransactionNotInProgressException.class,
                 ZKClient.class, DiscoveryServiceClient.class, ZKDiscoveryService.class,
                 Cancellable.class, TTransportException.class, SpanReceiver.class, TransactionProcessor.class, Gauge.class, MetricRegistriesImpl.class);
-        LOG.info("UpdateStatisticsTool running for: " + tableName
+        LOGGER.info("UpdateStatisticsTool running for: " + tableName
                 + " on snapshot: " + snapshotName + " with restore dir: " + restoreDir);
     }
 
     private int runJob() {
         try {
             if (isForeground) {
-                LOG.info("Running UpdateStatisticsTool in Foreground. " +
+                LOGGER.info("Running UpdateStatisticsTool in Foreground. " +
                         "Runs full table scans. This may take a long time!");
                 return (job.waitForCompletion(true)) ? 0 : 1;
             } else {
-                LOG.info("Running UpdateStatisticsTool in Background - Submit async and exit");
+                LOGGER.info("Running UpdateStatisticsTool in Background - Submit async and exit");
                 job.submit();
                 return 0;
             }
         } catch (Exception e) {
-            LOG.error("Caught exception " + e + " trying to update statistics.");
+            LOGGER.error("Caught exception " + e + " trying to update statistics.");
             return 1;
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
index 865d210..ed35ec1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -234,7 +234,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         }
     }
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixCanaryTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixCanaryTool.class);
 
     private static String getCurrentTimestamp() {
         return new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.ms").format(new Date());
@@ -289,7 +289,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         try {
             Namespace cArgs = parseArgs(args);
             if (cArgs == null) {
-                LOG.error("Argument parsing failed.");
+                LOGGER.error("Argument parsing failed.");
                 throw new RuntimeException("Argument parsing failed");
             }
 
@@ -326,7 +326,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             connection = getConnectionWithRetry(connectionURL);
 
             if (connection == null) {
-                LOG.error("Failed to get connection after multiple retries; the connection is null");
+                LOGGER.error("Failed to get connection after multiple retries; the connection is null");
             }
 
             SimpleTimeLimiter limiter = new SimpleTimeLimiter();
@@ -338,10 +338,10 @@ public class PhoenixCanaryTool extends Configured implements Tool {
                     sink.clearResults();
 
                     // Execute tests
-                    LOG.info("Starting UpsertTableTest");
+                    LOGGER.info("Starting UpsertTableTest");
                     sink.updateResults(new UpsertTableTest().runTest(connection));
 
-                    LOG.info("Starting ReadTableTest");
+                    LOGGER.info("Starting ReadTableTest");
                     sink.updateResults(new ReadTableTest().runTest(connection));
                     return null;
 
@@ -354,7 +354,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             appInfo.setSuccessful(true);
 
         } catch (Exception e) {
-            LOG.error(Throwables.getStackTraceAsString(e));
+            LOGGER.error(Throwables.getStackTraceAsString(e));
             appInfo.setMessage(Throwables.getStackTraceAsString(e));
             appInfo.setSuccessful(false);
 
@@ -372,11 +372,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         try{
             connection = getConnectionWithRetry(connectionURL, true);
         } catch (Exception e) {
-            LOG.info("Failed to get connection with namespace enabled", e);
+            LOGGER.info("Failed to get connection with namespace enabled", e);
             try {
                 connection = getConnectionWithRetry(connectionURL, false);
             } catch (Exception ex) {
-                LOG.info("Failed to get connection without namespace enabled", ex);
+                LOGGER.info("Failed to get connection without namespace enabled", ex);
             }
         }
         return connection;
@@ -392,7 +392,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
 
         RetryCounter retrier = new RetryCounter(MAX_CONNECTION_ATTEMPTS,
                 FIRST_TIME_RETRY_TIMEOUT, TimeUnit.MILLISECONDS);
-        LOG.info("Trying to get the connection with "
+        LOGGER.info("Trying to get the connection with "
                 + retrier.getMaxAttempts() + " attempts with "
                 + "connectionURL :" + connectionURL
                 + "connProps :" + connProps);
@@ -400,11 +400,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             try {
                 connection = DriverManager.getConnection(connectionURL, connProps);
             } catch (SQLException e) {
-                LOG.info("Trying to establish connection with "
+                LOGGER.info("Trying to establish connection with "
                         + retrier.getAttemptTimes() + " attempts", e);
             }
             if (connection != null) {
-                LOG.info("Successfully established connection within "
+                LOGGER.info("Successfully established connection within "
                         + retrier.getAttemptTimes() + " attempts");
                 break;
             }
@@ -415,11 +415,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
 
     public static void main(final String[] args) {
         try {
-            LOG.info("Starting Phoenix Canary Test tool...");
+            LOGGER.info("Starting Phoenix Canary Test tool...");
             ToolRunner.run(new PhoenixCanaryTool(), args);
         } catch (Exception e) {
-            LOG.error("Error in running Phoenix Canary Test tool. " + e);
+            LOGGER.error("Error in running Phoenix Canary Test tool. " + e);
         }
-        LOG.info("Exiting Phoenix Canary Test tool...");
+        LOGGER.info("Exiting Phoenix Canary Test tool...");
     }
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
index fea6d61..a2b0e4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
@@ -35,8 +35,6 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.metrics2.AbstractMetric;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -78,7 +78,7 @@ import com.google.common.base.Joiner;
  */
 public class PhoenixMetricsSink implements MetricsSink {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMetricsSink.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsSink.class);
 
     private static final String VARIABLE_VALUE = "?";
 
@@ -102,14 +102,14 @@ public class PhoenixMetricsSink implements MetricsSink {
     private String table;
     
     public PhoenixMetricsSink() {
-        LOG.info("Writing tracing metrics to phoenix table");
+        LOGGER.info("Writing tracing metrics to phoenix table");
 
     }
 
     @Override
     public void init(SubsetConfiguration config) {
         Metrics.markSinkInitialized();
-        LOG.info("Phoenix tracing writer started");
+        LOGGER.info("Phoenix tracing writer started");
     }
 
     /**
@@ -210,7 +210,7 @@ public class PhoenixMetricsSink implements MetricsSink {
         try {
             this.conn.commit();
         } catch (SQLException e) {
-            LOG.error("Failed to commit changes to table", e);
+            LOGGER.error("Failed to commit changes to table", e);
         }
     }
 
@@ -270,7 +270,7 @@ public class PhoenixMetricsSink implements MetricsSink {
             } else if (tag.name().equals("Context")) {
                 // ignored
             } else {
-                LOG.error("Got an unexpected tag: " + tag);
+                LOGGER.error("Got an unexpected tag: " + tag);
             }
         }
 
@@ -286,9 +286,9 @@ public class PhoenixMetricsSink implements MetricsSink {
         stmt += COMMAS.join(keys);
         stmt += ") VALUES (" + COMMAS.join(values) + ")";
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Logging metrics to phoenix table via: " + stmt);
-            LOG.trace("With tags: " + variableValues);
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Logging metrics to phoenix table via: " + stmt);
+            LOGGER.trace("With tags: " + variableValues);
         }
         try {
             PreparedStatement ps = conn.prepareStatement(stmt);
@@ -304,7 +304,7 @@ public class PhoenixMetricsSink implements MetricsSink {
             MutationState newState = plan.execute();
             state.join(newState);
         } catch (SQLException e) {
-            LOG.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
+            LOGGER.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
                     e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 68b945c..88cc642 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -37,6 +35,8 @@ import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.LogUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.primitives.Longs;
@@ -46,7 +46,7 @@ import com.google.common.primitives.Longs;
  */
 public class TraceReader {
 
-    private static final Log LOG = LogFactory.getLog(TraceReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceReader.class);
     private final Joiner comma = Joiner.on(',');
     private String knownColumns;
     {
@@ -146,7 +146,7 @@ public class TraceReader {
                     orphan.parent = spanInfo;
                     spanInfo.children.add(orphan);
                     // / its no longer an orphan
-                    LOG.trace(addCustomAnnotations("Found parent for span: " + span));
+                    LOGGER.trace(addCustomAnnotations("Found parent for span: " + span));
                     orphans.remove(i--);
                 }
             }
@@ -156,7 +156,7 @@ public class TraceReader {
                 parentSpan.children.add(spanInfo);
             } else if (parent != Span.ROOT_SPAN_ID) {
                 // add the span to the orphan pile to check for the remaining spans we see
-                LOG.info(addCustomAnnotations("No parent span found for span: " + span + " (root span id: "
+                LOGGER.info(addCustomAnnotations("No parent span found for span: " + span + " (root span id: "
                         + Span.ROOT_SPAN_ID + ")"));
                 orphans.add(spanInfo);
             }
@@ -213,7 +213,7 @@ public class TraceReader {
                         + MetricInfo.TRACE.columnName + "=" + traceid + " AND "
                         + MetricInfo.PARENT.columnName + "=" + parent + " AND "
                         + MetricInfo.SPAN.columnName + "=" + span;
-        LOG.trace(addCustomAnnotations("Requesting columns with: " + request));
+        LOGGER.trace(addCustomAnnotations("Requesting columns with: " + request));
         ResultSet results = conn.createStatement().executeQuery(request);
         List<String> cols = new ArrayList<String>();
         while (results.next()) {
@@ -222,7 +222,7 @@ public class TraceReader {
             }
         }
         if (cols.size() < count) {
-            LOG.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
+            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
                     + " tags from rquest " + request));
         }
         return cols;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
index 122ae28..a2b84b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
@@ -22,13 +22,13 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.impl.MilliSpan;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Sink for request traces ({@link SpanReceiver}) that pushes writes to {@link TraceWriter} in a
@@ -64,7 +64,7 @@ import org.apache.phoenix.query.QueryServicesOptions;
  */
 public class TraceSpanReceiver implements SpanReceiver {
 
-    private static final Log LOG = LogFactory.getLog(TraceSpanReceiver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceSpanReceiver.class);
 
     private static final int CAPACITY = QueryServicesOptions.withDefaults().getTracingTraceBufferSize();
 
@@ -77,11 +77,11 @@ public class TraceSpanReceiver implements SpanReceiver {
     @Override
     public void receiveSpan(Span span) {
         if (span.getTraceId() != 0 && spanQueue.offer(span)) {
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Span buffered to queue " + span.toJson());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Span buffered to queue " + span.toJson());
             }
-        } else if (span.getTraceId() != 0 && LOG.isDebugEnabled()) {
-                LOG.debug("Span NOT buffered due to overflow in queue " + span.toJson());
+        } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson());
         }
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
index e823359..f8dc19e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
@@ -38,8 +38,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.util.Pair;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -68,7 +68,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * batch commit size.
  */
 public class TraceWriter {
-    private static final Log LOG = LogFactory.getLog(TraceWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceWriter.class);
 
     private static final String VARIABLE_VALUE = "?";
 
@@ -105,9 +105,9 @@ public class TraceWriter {
 
         traceSpanReceiver = getTraceSpanReceiver();
         if (traceSpanReceiver == null) {
-            LOG.warn(
+            LOGGER.warn(
                 "No receiver has been initialized for TraceWriter. Traces will not be written.");
-            LOG.warn("Restart Phoenix to try again.");
+            LOGGER.warn("Restart Phoenix to try again.");
             return;
         }
 
@@ -119,7 +119,7 @@ public class TraceWriter {
             executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS);
         }
 
-        LOG.info("Writing tracing metrics to phoenix table");
+        LOGGER.info("Writing tracing metrics to phoenix table");
     }
 
     @VisibleForTesting
@@ -142,8 +142,8 @@ public class TraceWriter {
             while (!traceSpanReceiver.isSpanAvailable()) {
                 Span span = traceSpanReceiver.getSpan();
                 if (null == span) break;
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("Span received: " + span.toJson());
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace("Span received: " + span.toJson());
                 }
                 addToBatch(span);
                 counter++;
@@ -217,9 +217,9 @@ public class TraceWriter {
             stmt += COMMAS.join(keys);
             stmt += ") VALUES (" + COMMAS.join(values) + ")";
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Logging metrics to phoenix table via: " + stmt);
-                LOG.trace("With tags: " + variableValues);
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Logging metrics to phoenix table via: " + stmt);
+                LOGGER.trace("With tags: " + variableValues);
             }
             try {
                 PreparedStatement ps = conn.prepareStatement(stmt);
@@ -237,7 +237,7 @@ public class TraceWriter {
                 MutationState newState = plan.execute();
                 state.join(newState);
             } catch (SQLException e) {
-                LOG.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt,
+                LOGGER.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt,
                     e);
             }
         }
@@ -272,14 +272,14 @@ public class TraceWriter {
                 createTable(conn, tableName);
             }
 
-            LOG.info(
+            LOGGER.info(
                 "Created new connection for tracing " + conn.toString() + " Table: " + tableName);
             return conn;
         } catch (Exception e) {
-            LOG.error("Tracing will NOT be pursued. New connection failed for tracing Table: "
+            LOGGER.error("Tracing will NOT be pursued. New connection failed for tracing Table: "
                     + tableName,
                 e);
-            LOG.error("Restart Phoenix to retry.");
+            LOGGER.error("Restart Phoenix to retry.");
             return null;
         }
     }
@@ -324,7 +324,7 @@ public class TraceWriter {
         try {
             conn.commit();
         } catch (SQLException e) {
-            LOG.error(
+            LOGGER.error(
                 "Unable to commit traces on conn: " + conn.toString() + " to table: " + tableName,
                 e);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index 616ad30..f68d8cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -26,8 +26,6 @@ import java.util.concurrent.Callable;
 
 import javax.annotation.Nullable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.htrace.HTraceConfiguration;
 import org.apache.phoenix.call.CallRunner;
@@ -46,6 +44,8 @@ import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.htrace.wrappers.TraceCallable;
 import org.apache.htrace.wrappers.TraceRunnable;
 import org.apache.phoenix.trace.TraceWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
@@ -56,7 +56,7 @@ import com.sun.istack.NotNull;
  */
 public class Tracing {
 
-    private static final Log LOG = LogFactory.getLog(Tracing.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Tracing.class);
 
     private static final String SEPARATOR = ".";
     // Constants for tracing across the wire
@@ -275,14 +275,14 @@ public class Tracing {
                 traceWriter.start();
             }
         } catch (RuntimeException e) {
-            LOG.warn("Tracing will outputs will not be written to any metrics sink! No "
+            LOGGER.warn("Tracing will outputs will not be written to any metrics sink! No "
                     + "TraceMetricsSink found on the classpath", e);
         } catch (IllegalAccessError e) {
             // This is an issue when we have a class incompatibility error, such as when running
             // within SquirrelSQL which uses an older incompatible version of commons-collections.
             // Seeing as this only results in disabling tracing, we swallow this exception and just
             // continue on without tracing.
-            LOG.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
+            LOGGER.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
         }
         initialized = true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 23f123e..a5f0177 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -42,7 +42,7 @@ import com.google.common.collect.ImmutableMap;
  */
 public class CSVCommonsLoader {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CSVCommonsLoader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CSVCommonsLoader.class);
 
     public static final String DEFAULT_ARRAY_ELEMENT_SEPARATOR = ":";
 
@@ -277,10 +277,10 @@ public class CSVCommonsLoader {
             totalUpserts = upsertCount;
             if (upsertCount % upsertBatchSize == 0) {
                 if (upsertCount % 1000 == 0) {
-                    LOG.info("Processed upsert #{}", upsertCount);
+                    LOGGER.info("Processed upsert #{}", upsertCount);
                 }
                 try {
-                    LOG.info("Committing after {} records", upsertCount);
+                    LOGGER.info("Committing after {} records", upsertCount);
                     conn.commit();
                 } catch (SQLException e) {
                     throw new RuntimeException(e);
@@ -290,7 +290,7 @@ public class CSVCommonsLoader {
 
         @Override
         public void errorOnRecord(CSVRecord csvRecord, Throwable throwable) {
-            LOG.error("Error upserting record " + csvRecord, throwable.getMessage());
+            LOGGER.error("Error upserting record " + csvRecord, throwable.getMessage());
             if (strict) {
                 Throwables.propagate(throwable);
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
index 7649933..d042fac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
@@ -22,10 +22,10 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
  *  comes out to basically O(log(T))
  */
 public class EquiDepthStreamHistogram {
-    private static final Log LOG = LogFactory.getLog(EquiDepthStreamHistogram.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(EquiDepthStreamHistogram.class);
 
     // used in maxSize calculation for each bar
     private static final double MAX_COEF = 1.7;
@@ -175,8 +175,8 @@ public class EquiDepthStreamHistogram {
         } else {
             smallerBar.incrementCount(countToDistribute);
         }
-        if (LOG.isTraceEnabled()) {
-            LOG.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
         }
         bars.remove(origBar);
         bars.add(newLeft);
@@ -230,8 +230,8 @@ public class EquiDepthStreamHistogram {
         bars.subList(currMinIdx, currMinIdx + 2).clear(); // remove minBars
         bars.add(newBar);
         Collections.sort(bars);
-        if (LOG.isTraceEnabled()) {
-            LOG.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
         }
         return true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
index 13eae98..4422103 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
@@ -31,8 +31,6 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -43,6 +41,8 @@ import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 import org.codehaus.jettison.json.JSONException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.InvalidProtocolBufferException;
 
@@ -71,7 +71,7 @@ public class PhoenixMRJobUtil {
     public static final int RM_CONNECT_TIMEOUT_MILLIS = 10 * 1000;
     public static final int RM_READ_TIMEOUT_MILLIS = 10 * 60 * 1000;
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMRJobUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobUtil.class);
 
     public static final String PHOENIX_MR_SCHEDULER_TYPE_NAME = "phoenix.index.mr.scheduler.type";
 
@@ -101,11 +101,11 @@ public class PhoenixMRJobUtil {
                         byte[] data = zk.getData(path, zkw, new Stat());
                         ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data);
                         proto.getRmId();
-                        LOG.info("Active RmId : " + proto.getRmId());
+                        LOGGER.info("Active RmId : " + proto.getRmId());
 
                         activeRMHost =
                                 config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId());
-                        LOG.info("activeResourceManagerHostname = " + activeRMHost);
+                        LOGGER.info("activeResourceManagerHostname = " + activeRMHost);
 
                     }
                 }
@@ -140,7 +140,7 @@ public class PhoenixMRJobUtil {
             }
 
             url = urlBuilder.toString();
-            LOG.info("Attempt to get running/submitted jobs information from RM URL = " + url);
+            LOGGER.info("Attempt to get running/submitted jobs information from RM URL = " + url);
 
             URL obj = new URL(url);
             con = (HttpURLConnection) obj.openConnection();
@@ -155,7 +155,7 @@ public class PhoenixMRJobUtil {
             if (con != null) con.disconnect();
         }
 
-        LOG.info("Result of attempt to get running/submitted jobs from RM - URL=" + url
+        LOGGER.info("Result of attempt to get running/submitted jobs from RM - URL=" + url
                 + ",ResponseCode=" + con.getResponseCode() + ",Response=" + response);
 
         return response;
@@ -182,16 +182,16 @@ public class PhoenixMRJobUtil {
 
     public static void shutdown(ExecutorService pool) throws InterruptedException {
         pool.shutdown();
-        LOG.debug("Shutdown called");
+        LOGGER.debug("Shutdown called");
         pool.awaitTermination(200, TimeUnit.MILLISECONDS);
-        LOG.debug("Await termination called to wait for 200 msec");
+        LOGGER.debug("Await termination called to wait for 200 msec");
         if (!pool.isShutdown()) {
             pool.shutdownNow();
-            LOG.debug("Await termination called to wait for 200 msec");
+            LOGGER.debug("Await termination called to wait for 200 msec");
             pool.awaitTermination(100, TimeUnit.MILLISECONDS);
         }
         if (!pool.isShutdown()) {
-            LOG.warn("Pool did not shutdown");
+            LOGGER.warn("Pool did not shutdown");
         }
     }
 
@@ -222,7 +222,7 @@ public class PhoenixMRJobUtil {
         conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemoryMB);
         conf.set(MRJobConfig.MAP_JAVA_OPTS, XMX_OPT + ((int) (mapMemoryMB * 0.9)) + "m");
 
-        LOG.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB="
+        LOGGER.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB="
                 + conf.get(MRJobConfig.MAP_MEMORY_MB) + ";" + "Map Java Opts="
                 + conf.get(MRJobConfig.MAP_JAVA_OPTS));
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index 4501158..8fe513e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -31,8 +31,6 @@ import java.util.Properties;
 import javax.annotation.Nullable;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
@@ -50,6 +48,8 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PInteger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Joiner;
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
 
 public final class QueryUtil {
 
-    private static final Log LOG = LogFactory.getLog(QueryUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryUtil.class);
 
     /**
      *  Column family name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)}
@@ -353,7 +353,7 @@ public final class QueryUtil {
             throws SQLException, ClassNotFoundException {
         setServerConnection(props);
         String url = getConnectionUrl(props, null, principal);
-        LOG.info("Creating connection with the jdbc url: " + url);
+        LOGGER.info("Creating connection with the jdbc url: " + url);
         return DriverManager.getConnection(url, props);
     }
 
@@ -365,7 +365,7 @@ public final class QueryUtil {
     private static Connection getConnection(Properties props, Configuration conf)
             throws ClassNotFoundException, SQLException {
         String url = getConnectionUrl(props, conf);
-        LOG.info("Creating connection with the jdbc url: " + url);
+        LOGGER.info("Creating connection with the jdbc url: " + url);
         props = PropertiesUtil.combineProperties(props, conf);
         return DriverManager.getConnection(url, props);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 8817c1c..39986fb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -31,8 +31,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -65,10 +63,12 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings("deprecation")
 public class ServerUtil {
-    private static final Log LOG = LogFactory.getLog(ServerUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerUtil.class);
     private static final int COPROCESSOR_SCAN_WORKS = VersionUtil.encodeVersion("0.98.6");
     
     private static final String FORMAT = "ERROR %d (%s): %s";
@@ -350,7 +350,7 @@ public class ServerUtil {
                     try {
                         connection.close();
                     } catch (IOException e) {
-                        LOG.warn("Unable to close coprocessor connection", e);
+                        LOGGER.warn("Unable to close coprocessor connection", e);
                     }
                 }
                 connections.clear();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
index d9ce5f2..9ef7356 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
@@ -61,7 +61,7 @@ public abstract class UpsertExecutor<RECORD, FIELD> implements Closeable {
         void errorOnRecord(RECORD record, Throwable throwable);
     }
 
-    private static final Logger LOG = LoggerFactory.getLogger(UpsertExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpsertExecutor.class);
 
     protected final Connection conn;
     protected final List<ColumnInfo> columnInfos;
@@ -77,7 +77,7 @@ public abstract class UpsertExecutor<RECORD, FIELD> implements Closeable {
         PreparedStatement preparedStatement;
         try {
             String upsertSql = QueryUtil.constructUpsertStatement(tableName, columnInfoList);
-            LOG.info("Upserting SQL data with {}", upsertSql);
+            LOGGER.info("Upserting SQL data with {}", upsertSql);
             preparedStatement = conn.prepareStatement(upsertSql);
         } catch (SQLException e) {
             throw new RuntimeException(e);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
index 69ef0b5..9e6f649 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
@@ -21,39 +21,39 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ZKBasedMasterElectionUtil {
 
-    private static final Log LOG = LogFactory.getLog(ZKBasedMasterElectionUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ZKBasedMasterElectionUtil.class);
 
     public static boolean acquireLock(ZooKeeperWatcher zooKeeperWatcher, String parentNode,
             String lockName) throws KeeperException, InterruptedException {
         // Create the parent node as Persistent
-        LOG.info("Creating the parent lock node:" + parentNode);
+        LOGGER.info("Creating the parent lock node:" + parentNode);
         ZKUtil.createWithParents(zooKeeperWatcher, parentNode);
 
         // Create the ephemeral node
         String lockNode = parentNode + "/" + lockName;
         String nodeValue = getHostName() + "_" + UUID.randomUUID().toString();
-        LOG.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue);
+        LOGGER.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue);
         // Create the ephemeral node
         try {
             zooKeeperWatcher.getRecoverableZooKeeper().create(lockNode, Bytes.toBytes(nodeValue),
                 Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
         } catch (KeeperException.NodeExistsException e) {
-            LOG.info("Could not acquire lock. Another process had already acquired the lock on Node "
+            LOGGER.info("Could not acquire lock. Another process had already acquired the lock on Node "
                     + lockName);
             return false;
         }
-        LOG.info("Obtained the lock :" + lockNode);
+        LOGGER.info("Obtained the lock :" + lockNode);
         return true;
     }
 
@@ -62,7 +62,7 @@ public class ZKBasedMasterElectionUtil {
         try {
             host = InetAddress.getLocalHost().getCanonicalHostName();
         } catch (UnknownHostException e) {
-            LOG.error("UnknownHostException while trying to get the Local Host address : ", e);
+            LOGGER.error("UnknownHostException while trying to get the Local Host address : ", e);
         }
         return host;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index d2529f7..22da227 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -54,7 +54,7 @@ import com.google.common.base.Function;
 /** {@link UpsertExecutor} over {@link CSVRecord}s. */
 public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CsvUpsertExecutor.class);
 
     protected final String arrayElementSeparator;
 
@@ -95,10 +95,10 @@ public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on CSVRecord " + csvRecord, e);
+                LOGGER.debug("Error on CSVRecord " + csvRecord, e);
             }
             upsertListener.errorOnRecord(csvRecord, e);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
index fa14079..4929993 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
@@ -52,7 +52,7 @@ import com.google.common.base.Function;
 /** {@link UpsertExecutor} over {@link Map} objects, as parsed from JSON. */
 public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(JsonUpsertExecutor.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(JsonUpsertExecutor.class);
 
     /** Testing constructor. Do not use in prod. */
     @VisibleForTesting
@@ -106,10 +106,10 @@ public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
index 0388d9c..05d009c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
@@ -33,7 +33,7 @@ import com.google.common.annotations.VisibleForTesting;
 /** {@link UpsertExecutor} over {@link Map} objects, convert input record into {@link Map} objects by using regex. */
 public class RegexUpsertExecutor extends JsonUpsertExecutor {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(RegexUpsertExecutor.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(RegexUpsertExecutor.class);
 
     /** Testing constructor. Do not use in prod. */
     @VisibleForTesting
@@ -69,10 +69,10 @@ public class RegexUpsertExecutor extends JsonUpsertExecutor {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
index 7fa9c8e..26e3561 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -35,15 +33,15 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
-
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utility class for testing indexing
  */
 public class IndexTestingUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexTestingUtils.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexTestingUtils.class);
   private static final String MASTER_INFO_PORT_KEY = "hbase.master.info.port";
   private static final String RS_INFO_PORT_KEY = "hbase.regionserver.info.port";
   
@@ -65,7 +63,7 @@ public class IndexTestingUtils {
   @SuppressWarnings("javadoc")
   public static void verifyIndexTableAtTimestamp(HTable index1, List<KeyValue> expected,
       long start, long end, byte[] startKey, byte[] endKey) throws IOException {
-    LOG.debug("Scanning " + Bytes.toString(index1.getTableName()) + " between times (" + start
+    LOGGER.debug("Scanning " + Bytes.toString(index1.getTableName()) + " between times (" + start
         + ", " + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey)
         + "].");
     Scan s = new Scan(startKey, endKey);
@@ -76,7 +74,7 @@ public class IndexTestingUtils {
     ResultScanner scanner = index1.getScanner(s);
     for (Result r : scanner) {
       received.addAll(r.list());
-      LOG.debug("Received: " + r.list());
+      LOGGER.debug("Received: " + r.list());
     }
     scanner.close();
     assertEquals("Didn't get the expected kvs from the index table!", expected, received);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
index e9e025c..90d2920 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
@@ -17,20 +17,20 @@
  */
 package org.apache.phoenix.hbase.index;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * TEst helper to stub out an {@link Abortable} when needed.
  */
 public class StubAbortable implements Abortable {
-  private static final Log LOG = LogFactory.getLog(StubAbortable.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(StubAbortable.class);
   private boolean abort;
 
   @Override
   public void abort(String reason, Throwable e) {
-    LOG.info("Aborting: " + reason, e);
+    LOGGER.info("Aborting: " + reason, e);
     abort = true;
   }
 
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 58050c1..a50a267 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -31,8 +31,6 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -55,9 +53,11 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestIndexWriter {
-  private static final Log LOG = LogFactory.getLog(TestIndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestIndexWriter.class);
   @Rule
   public TableName testName = new TableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -90,8 +90,8 @@ public class TestIndexWriter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + testName.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + testName.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     Stoppable stop = Mockito.mock(Stoppable.class);
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -173,13 +173,13 @@ public class TestIndexWriter {
     Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
-        LOG.info("Write started");
+        LOGGER.info("Write started");
         writeStartedLatch.countDown();
         // when we interrupt the thread for shutdown, we should see this throw an interrupt too
         try {
         waitOnAbortedLatch.await();
         } catch (InterruptedException e) {
-          LOG.info("Correctly interrupted while writing!");
+          LOGGER.info("Correctly interrupted while writing!");
           throw e;
         }
         return null;
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index 55c3fb3..9f63556 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -50,13 +48,15 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 
 public class TestParalleIndexWriter {
 
-  private static final Log LOG = LogFactory.getLog(TestParalleIndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleIndexWriter.class);
   @Rule
   public TableName test = new TableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -85,8 +85,8 @@ public class TestParalleIndexWriter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + test.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + test.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     Stoppable stop = Mockito.mock(Stoppable.class);
     ExecutorService exec = Executors.newFixedThreadPool(1);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 9767eae..59a8390 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -49,13 +47,15 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 
 public class TestParalleWriterIndexCommitter {
 
-  private static final Log LOG = LogFactory.getLog(TestParalleWriterIndexCommitter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleWriterIndexCommitter.class);
   @Rule
   public TableName test = new TableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -84,8 +84,8 @@ public class TestParalleWriterIndexCommitter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + test.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + test.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
     Configuration conf =new Configuration();
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index b1e87e5..49d6513 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -30,8 +30,6 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -71,6 +69,8 @@ import org.junit.Assert;
 import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
@@ -83,7 +83,7 @@ import com.google.common.collect.Multimap;
 
 public class TestWALRecoveryCaching {
 
-  private static final Log LOG = LogFactory.getLog(TestWALRecoveryCaching.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestWALRecoveryCaching.class);
   private static final long ONE_SEC = 1000;
   private static final long ONE_MIN = 60 * ONE_SEC;
   private static final long TIMEOUT = ONE_MIN;
@@ -108,10 +108,10 @@ public class TestWALRecoveryCaching {
     public void preWALRestore(ObserverContext<RegionCoprocessorEnvironment> env, HRegionInfo info,
         HLogKey logKey, WALEdit logEdit) throws IOException {
       try {
-        LOG.debug("Restoring logs for index table");
+        LOGGER.debug("Restoring logs for index table");
         if (allowIndexTableToRecover != null) {
           allowIndexTableToRecover.await();
-          LOG.debug("Completed index table recovery wait latch");
+          LOGGER.debug("Completed index table recovery wait latch");
         }
       } catch (InterruptedException e) {
         Assert.fail("Should not be interrupted while waiting to allow the index to restore WALs.");
@@ -131,9 +131,9 @@ public class TestWALRecoveryCaching {
     @Override
     public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted,
         Exception cause) throws IOException {
-      LOG.debug("Found index update failure!");
+      LOGGER.debug("Found index update failure!");
       if (allowIndexTableToRecover != null) {
-        LOG.info("failed index write on WAL recovery - allowing index table to be restored.");
+        LOGGER.info("failed index write on WAL recovery - allowing index table to be restored.");
         allowIndexTableToRecover.countDown();
       }
       super.handleFailure(attempted, cause);
@@ -209,24 +209,24 @@ public class TestWALRecoveryCaching {
       Bytes.toBytes(indexedTableName)));
 
     // log all the current state of the server
-    LOG.info("Current Server/Region paring: ");
+    LOGGER.info("Current Server/Region paring: ");
     for (RegionServerThread t : util.getMiniHBaseCluster().getRegionServerThreads()) {
       // check all the conditions for the server to be done
       HRegionServer server = t.getRegionServer();
       if (server.isStopping() || server.isStopped() || server.isAborted()) {
-        LOG.info("\t== Offline: " + server.getServerName());
+        LOGGER.info("\t== Offline: " + server.getServerName());
         continue;
       }
       List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server.getRSRpcServices());
-      LOG.info("\t" + server.getServerName() + " regions: " + regions);
+      LOGGER.info("\t" + server.getServerName() + " regions: " + regions);
     }
 
-    LOG.debug("Killing server " + shared);
+    LOGGER.debug("Killing server " + shared);
     util.getMiniHBaseCluster().killRegionServer(shared);
-    LOG.debug("Waiting on server " + shared + "to die");
+    LOGGER.debug("Waiting on server " + shared + "to die");
     util.getMiniHBaseCluster().waitForRegionServerToStop(shared, TIMEOUT);
     // force reassign the regions from the table
-    // LOG.debug("Forcing region reassignment from the killed server: " + shared);
+    // LOGGER.debug("Forcing region reassignment from the killed server: " + shared);
     // for (HRegion region : online) {
     // util.getMiniHBaseCluster().getMaster().assign(region.getRegionName());
     // }
@@ -250,7 +250,7 @@ public class TestWALRecoveryCaching {
     ResultScanner scanner = index.getScanner(s);
     int count = 0;
     for (Result r : scanner) {
-      LOG.info("Got index table result:" + r);
+      LOGGER.info("Got index table result:" + r);
       count++;
     }
     assertEquals("Got an unexpected found of index rows", 1, count);
@@ -308,7 +308,7 @@ public class TestWALRecoveryCaching {
         // find the regionserver that matches the passed server
         List<Region> online = getRegionsFromServerForTable(cluster, server, table);
 
-        LOG.info("Shutting down and reassigning regions from " + server);
+        LOGGER.info("Shutting down and reassigning regions from " + server);
         cluster.stopRegionServer(server);
         cluster.waitForRegionServerToStop(server, TIMEOUT);
 
@@ -317,13 +317,13 @@ public class TestWALRecoveryCaching {
           cluster.getMaster().getAssignmentManager().assign(Lists.newArrayList(region.getRegionInfo()));
         }
 
-        LOG.info("Starting region server:" + server.getHostname());
+        LOGGER.info("Starting region server:" + server.getHostname());
         cluster.startRegionServer(server.getHostname(), server.getPort());
 
         cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), TIMEOUT);
 
         // start a server to get back to the base number of servers
-        LOG.info("STarting server to replace " + server);
+        LOGGER.info("STarting server to replace " + server);
         cluster.startRegionServer();
         break;
       }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
index 2cea684..bd6ef33 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
@@ -18,12 +18,12 @@
 package org.apache.phoenix.metrics;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.phoenix.trace.TracingUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Simple sink that just logs the output of all the metrics that start with
@@ -31,7 +31,7 @@ import org.apache.phoenix.trace.TracingUtils;
  */
 public class LoggingSink implements MetricsSink {
 
-    private static final Log LOG = LogFactory.getLog(LoggingSink.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LoggingSink.class);
 
     @Override
     public void init(SubsetConfiguration config) {
@@ -42,14 +42,14 @@ public class LoggingSink implements MetricsSink {
         // we could wait until flush, but this is a really lightweight process, so we just write
         // them
         // as soon as we get them
-        if (!LOG.isDebugEnabled()) {
+        if (!LOGGER.isDebugEnabled()) {
             return;
         }
-        LOG.debug("Found record:" + record.name());
+        LOGGER.debug("Found record:" + record.name());
         for (AbstractMetric metric : record.metrics()) {
             // just print the metric we care about
             if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
-                LOG.debug("\t metric:" + metric);
+                LOGGER.debug("\t metric:" + metric);
             }
         }
     }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
index 4e678f3..91f6c72 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
@@ -19,8 +19,7 @@ package org.apache.phoenix.tool;
 
 import com.google.common.collect.Maps;
 import com.google.gson.Gson;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
@@ -31,6 +30,8 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -57,7 +58,7 @@ import static org.junit.Assert.assertTrue;
 @Category(NeedsOwnMiniClusterTest.class)
 public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 
-	private static final Log logger = LogFactory.getLog(ParameterizedPhoenixCanaryToolIT.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
 	private static final String stdOutSink
 			= "org.apache.phoenix.tool.PhoenixCanaryTool$StdOutSink";
 	private static final String fileOutSink
@@ -107,7 +108,7 @@ public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 			tearDownMiniClusterAsync(1);
 			setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
 					new ReadOnlyProps(clientProps.entrySet().iterator()));
-			logger.info("New cluster is spinned up with test parameters " +
+			LOGGER.info("New cluster is spinned up with test parameters " +
 					"isPositiveTestType" + this.isPositiveTestType +
 					"isNamespaceEnabled" + this.isNamespaceEnabled +
 					"resultSinkOption" + this.resultSinkOption);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
index a757780..c1bcd64 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
@@ -21,8 +21,6 @@ import java.sql.Statement;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /*
  * This test is wrt to https://issues.apache.org/jira/browse/PHOENIX-4993.Test checks 1. region
@@ -41,7 +41,7 @@ import org.junit.Test;
  */
 public class CoprocessorHConnectionTableFactoryTest extends BaseUniqueNamesOwnClusterIT {
   private static String ORG_PREFIX = "ORG";
-  private static final Log LOG = LogFactory.getLog(CoprocessorHConnectionTableFactoryTest.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(CoprocessorHConnectionTableFactoryTest.class);
 
   @BeforeClass
   public static final void doSetup() throws Exception {
@@ -70,7 +70,7 @@ public class CoprocessorHConnectionTableFactoryTest extends BaseUniqueNamesOwnCl
       }
       conn.commit();
     } catch (Exception e) {
-      LOG.error("Client side exception:" + e);
+      LOGGER.error("Client side exception:" + e);
     }
 
   }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 0e077e9..4a628d8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -53,8 +53,6 @@ import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
@@ -129,6 +127,8 @@ import org.apache.phoenix.schema.stats.GuidePostsKey;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.transaction.TransactionFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Objects;
 import com.google.common.collect.Lists;
@@ -136,7 +136,7 @@ import com.google.common.collect.Lists;
 
 
 public class TestUtil {
-    private static final Log LOG = LogFactory.getLog(TestUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TestUtil.class);
     
     private static final Long ZERO = new Long(0);
     public static final String DEFAULT_SCHEMA_NAME = "S";
@@ -831,11 +831,11 @@ public class TestUtil {
                 try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                     ResultScanner scanner = htableForRawScan.getScanner(scan);
                     List<Result> results = Lists.newArrayList(scanner);
-                    LOG.info("Results: " + results);
+                    LOGGER.info("Results: " + results);
                     compactionDone = results.isEmpty();
                     scanner.close();
                 }
-                LOG.info("Compaction done: " + compactionDone);
+                LOGGER.info("Compaction done: " + compactionDone);
                 
                 // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
                 if (!compactionDone && table.isTransactional()) {
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index 6e828bd..ef2e167 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -31,7 +31,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 class MultithreadedDiffer implements Callable<Void> {
-    private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class);
+
     private Thread t;
     private Query query;
     private ThreadTime threadTime;
@@ -82,7 +83,7 @@ class MultithreadedDiffer implements Callable<Void> {
      * Executes verification runs for a minimum of number of execution or execution duration
      */
     public Void call() throws Exception {
-        logger.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
+        LOGGER.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
                 + numberOfExecutions + "times\n\n");
         Long start = System.currentTimeMillis();
         for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
@@ -93,7 +94,7 @@ class MultithreadedDiffer implements Callable<Void> {
                 e.printStackTrace();
             }
         }
-        logger.info("\n\nThread exiting." + t.getName() + "\n\n");
+        LOGGER.info("\n\nThread exiting." + t.getName() + "\n\n");
         return null;
     }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
index c5746f9..a4285f4 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
@@ -33,7 +33,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class XMLConfigParserTest {
-    private static final Logger LOG = LoggerFactory.getLogger(XMLConfigParserTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParserTest.class);
   
     @Test
     public void testDTDInScenario() throws Exception {
@@ -45,7 +45,7 @@ public class XMLConfigParserTest {
             fail("The scenario should have failed to parse because it contains a DTD");
         } catch (UnmarshalException e) {
             // If we don't parse the DTD, the variable 'name' won't be defined in the XML
-            LOG.warn("Caught expected exception", e);
+            LOGGER.warn("Caught expected exception", e);
             Throwable cause = e.getLinkedException();
             assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException);
         }
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
index 98c492f..83a28e0 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
@@ -32,7 +32,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class XMLResultHandlerTest {
-    private static final Logger LOG = LoggerFactory.getLogger(XMLResultHandlerTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLResultHandlerTest.class);
 
     @Test
     public void testDTDInResults() throws Exception {
@@ -45,7 +45,7 @@ public class XMLResultHandlerTest {
           fail("Expected to see an exception parsing the results with a DTD");
         } catch (UnmarshalException e) {
           // If we don't parse the DTD, the variable 'name' won't be defined in the XML
-          LOG.debug("Caught expected exception", e);
+          LOGGER.debug("Caught expected exception", e);
           Throwable cause = e.getLinkedException();
           assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException);
         }
diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
index 249f8e6..91c857d 100755
--- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
+++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
@@ -19,8 +19,6 @@ package org.apache.phoenix.tracingwebapp.http;
 import java.net.URL;
 import java.security.ProtectionDomain;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.util.Tool;
@@ -28,13 +26,15 @@ import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.BasicConfigurator;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.webapp.WebAppContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * tracing web app runner
  */
 public final class Main extends Configured implements Tool {
 
-    protected static final Log LOG = LogFactory.getLog(Main.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(Main.class);
     public static final String PHONIX_DBSERVER_PORT_KEY =
         "phoenix.dbserver.port";
     public static final int DEFAULT_DBSERVER_PORT = 2181;