You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ch...@apache.org on 2019/06/28 23:44:28 UTC

[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5228 use slf4j for logging in phoenix project

This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
     new 2c0961d  PHOENIX-5228 use slf4j for logging in phoenix project
2c0961d is described below

commit 2c0961dd2cba2ed61ae0262a4c075536032e35b0
Author: Xinyi <xy...@salesforce.com>
AuthorDate: Tue Jun 4 18:27:30 2019 -0700

    PHOENIX-5228 use slf4j for logging in phoenix project
    
    Signed-off-by: Chinmay Kulkarni <ch...@apache.org>
---
 .../wal/WALRecoveryRegionPostOpenIT.java           | 12 +++----
 ...WALReplayWithIndexWritesAndCompressedWALIT.java | 12 +++----
 .../src/it/java/org/apache/phoenix/Sandbox.java    |  4 +--
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 14 ++++----
 .../apache/phoenix/end2end/End2EndTestDriver.java  |  4 +--
 .../apache/phoenix/end2end/OrphanViewToolIT.java   |  4 +--
 .../index/IndexRebuildIncrementDisableCountIT.java | 12 +++----
 .../index/InvalidIndexStateClientSideIT.java       | 11 +++---
 .../phoenix/end2end/index/MutableIndexIT.java      |  4 +--
 .../end2end/index/MutableIndexReplicationIT.java   | 24 ++++++-------
 .../end2end/index/PartialIndexRebuilderIT.java     |  4 +--
 .../index/FailForUnsupportedHBaseVersionsIT.java   |  8 ++---
 .../phoenix/monitoring/PhoenixMetricsIT.java       |  6 ++--
 .../apache/phoenix/query/ConnectionCachingIT.java  |  6 ++--
 .../apache/phoenix/trace/BaseTracingTestIT.java    | 10 +++---
 .../phoenix/trace/PhoenixTracingEndToEndIT.java    | 36 +++++++++----------
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      | 12 +++----
 .../IndexHalfStoreFileReaderGenerator.java         | 10 +++---
 .../wal/BinaryCompatibleBaseDecoder.java           | 13 ++++---
 .../apache/phoenix/cache/ServerCacheClient.java    | 27 ++++++++------
 .../java/org/apache/phoenix/call/CallRunner.java   |  9 +++--
 .../coprocessor/MetaDataRegionObserver.java        |  5 +--
 .../coprocessor/PhoenixAccessController.java       | 22 ++++++------
 .../org/apache/phoenix/execute/BaseQueryPlan.java  | 14 ++++----
 .../org/apache/phoenix/execute/HashJoinPlan.java   |  8 ++---
 .../expression/function/CollationKeyFunction.java  | 26 +++++++-------
 .../org/apache/phoenix/hbase/index/Indexer.java    | 42 +++++++++++-----------
 .../apache/phoenix/hbase/index/LockManager.java    |  8 ++---
 .../hbase/index/builder/BaseIndexBuilder.java      |  8 ++---
 .../hbase/index/builder/IndexBuildManager.java     |  6 ++--
 .../hbase/index/covered/NonTxIndexBuilder.java     | 10 +++---
 .../hbase/index/covered/data/IndexMemStore.java    | 24 ++++++-------
 .../hbase/index/parallel/BaseTaskRunner.java       | 10 +++---
 .../index/parallel/QuickFailingTaskRunner.java     |  7 ++--
 .../phoenix/hbase/index/parallel/TaskBatch.java    |  8 ++---
 .../hbase/index/parallel/ThreadPoolBuilder.java    | 10 +++---
 .../hbase/index/parallel/ThreadPoolManager.java    | 14 ++++----
 .../hbase/index/util/IndexManagementUtil.java      | 10 +++---
 .../phoenix/hbase/index/write/IndexWriter.java     | 16 ++++-----
 .../hbase/index/write/IndexWriterUtils.java        |  6 ++--
 .../index/write/KillServerOnFailurePolicy.java     | 10 +++---
 .../hbase/index/write/RecoveryIndexWriter.java     | 12 +++----
 .../TrackingParallelWriterIndexCommitter.java      | 18 +++++-----
 .../phoenix/index/PhoenixIndexFailurePolicy.java   | 29 +++++++--------
 .../phoenix/index/PhoenixTransactionalIndexer.java | 10 +++---
 .../apache/phoenix/iterate/SnapshotScanner.java    | 11 +++---
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java | 12 +++----
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |  8 ++---
 .../java/org/apache/phoenix/log/QueryLogger.java   | 12 +++----
 .../apache/phoenix/log/QueryLoggerDisruptor.java   | 10 +++---
 .../org/apache/phoenix/log/TableLogWriter.java     |  8 ++---
 .../phoenix/mapreduce/AbstractBulkLoadTool.java    | 26 +++++++-------
 .../mapreduce/FormatToBytesWritableMapper.java     |  4 +--
 .../phoenix/mapreduce/FormatToKeyValueReducer.java |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  | 10 +++---
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |  8 ++---
 .../phoenix/mapreduce/PhoenixInputFormat.java      | 20 +++++------
 .../phoenix/mapreduce/PhoenixOutputFormat.java     |  8 ++---
 .../phoenix/mapreduce/PhoenixRecordReader.java     | 12 +++----
 .../phoenix/mapreduce/PhoenixRecordWriter.java     | 12 +++----
 .../PhoenixServerBuildIndexInputFormat.java        |  8 ++---
 .../phoenix/mapreduce/RegexToKeyValueMapper.java   |  5 +--
 .../mapreduce/index/DirectHTableWriter.java        |  8 ++---
 .../mapreduce/index/IndexScrutinyMapper.java       | 10 +++---
 .../phoenix/mapreduce/index/IndexScrutinyTool.java | 20 +++++------
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 26 +++++++-------
 .../phoenix/mapreduce/index/IndexToolUtil.java     |  4 +--
 .../index/PhoenixIndexImportDirectMapper.java      | 10 +++---
 .../index/PhoenixIndexImportDirectReducer.java     |  4 +--
 .../mapreduce/index/PhoenixIndexImportMapper.java  |  6 ++--
 .../index/PhoenixIndexPartialBuildMapper.java      | 10 +++---
 .../index/PhoenixServerBuildIndexMapper.java       |  2 +-
 .../index/automation/PhoenixMRJobSubmitter.java    | 32 ++++++++---------
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 16 ++++-----
 .../java/org/apache/phoenix/metrics/Metrics.java   | 12 +++----
 .../schema/stats/DefaultStatisticsCollector.java   |  6 ++--
 .../phoenix/schema/stats/StatisticsScanner.java    | 28 +++++++--------
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 28 +++++++--------
 .../apache/phoenix/trace/PhoenixMetricsSink.java   | 22 ++++++------
 .../java/org/apache/phoenix/trace/TraceReader.java | 14 ++++----
 .../apache/phoenix/trace/TraceSpanReceiver.java    | 14 ++++----
 .../java/org/apache/phoenix/trace/TraceWriter.java | 32 ++++++++---------
 .../org/apache/phoenix/trace/util/Tracing.java     | 10 +++---
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |  8 ++---
 .../phoenix/util/EquiDepthStreamHistogram.java     | 14 ++++----
 .../org/apache/phoenix/util/PhoenixMRJobUtil.java  | 24 ++++++-------
 .../java/org/apache/phoenix/util/QueryUtil.java    | 10 +++---
 .../java/org/apache/phoenix/util/ServerUtil.java   |  8 ++---
 .../org/apache/phoenix/util/UpsertExecutor.java    |  4 +--
 .../phoenix/util/ZKBasedMasterElectionUtil.java    | 16 ++++-----
 .../apache/phoenix/util/csv/CsvUpsertExecutor.java |  6 ++--
 .../phoenix/util/json/JsonUpsertExecutor.java      |  6 ++--
 .../phoenix/util/regex/RegexUpsertExecutor.java    |  6 ++--
 .../phoenix/hbase/index/IndexTestingUtils.java     | 12 +++----
 .../apache/phoenix/hbase/index/StubAbortable.java  |  9 +++--
 .../phoenix/hbase/index/write/TestIndexWriter.java | 15 ++++----
 .../hbase/index/write/TestParalleIndexWriter.java  | 10 +++---
 .../write/TestParalleWriterIndexCommitter.java     | 10 +++---
 .../hbase/index/write/TestWALRecoveryCaching.java  | 34 +++++++++---------
 .../phoenix/jdbc/SecureUserConnectionsTest.java    |  8 ++---
 .../org/apache/phoenix/metrics/LoggingSink.java    | 12 +++----
 .../tool/ParameterizedPhoenixCanaryToolIT.java     |  8 ++---
 .../CoprocessorHConnectionTableFactoryTest.java    |  9 ++---
 .../java/org/apache/phoenix/util/TestUtil.java     | 10 +++---
 .../pherf/workload/MultithreadedDiffer.java        |  7 ++--
 .../apache/phoenix/pherf/XMLConfigParserTest.java  |  4 +--
 .../pherf/result/impl/XMLResultHandlerTest.java    |  4 +--
 .../apache/phoenix/tracingwebapp/http/Main.java    |  7 ++--
 108 files changed, 664 insertions(+), 662 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
index db64c20..5d7d438 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
@@ -37,8 +37,6 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -70,6 +68,8 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -80,7 +80,7 @@ import com.google.common.collect.Multimap;
 @Category(NeedsOwnMiniClusterTest.class)
 public class WALRecoveryRegionPostOpenIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(WALRecoveryRegionPostOpenIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WALRecoveryRegionPostOpenIT.class);
 
     private static final String DATA_TABLE_NAME="DATA_POST_OPEN";
 
@@ -145,10 +145,10 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
         @Override
         public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws IOException
         {
-            LOG.info("Found index update failure!");
+            LOGGER.info("Found index update failure!");
             handleFailureCalledCount++;
             tableReferenceToMutation=attempted;
-            LOG.info("failed index update on WAL recovery - allowing index table can be write.");
+            LOGGER.info("failed index update on WAL recovery - allowing index table can be write.");
             failIndexTableWrite=false;
             super.handleFailure(attempted, cause);
 
@@ -263,7 +263,7 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
             resultScanner = primaryTable.getScanner(scan);
             count = 0;
             for (Result result : resultScanner) {
-                LOG.info("Got data table result:" + result);
+                LOGGER.info("Got data table result:" + result);
                 count++;
             }
             assertEquals("Got an unexpected found of data rows", 1, count);
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 1b8639d..10e5b80 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -26,8 +26,6 @@ import static org.mockito.Mockito.when;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -69,6 +67,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * For pre-0.94.9 instances, this class tests correctly deserializing WALEdits w/o compression. Post
@@ -86,7 +86,7 @@ import org.mockito.Mockito;
 @Category(NeedsOwnMiniClusterTest.class)
 public class WALReplayWithIndexWritesAndCompressedWALIT {
 
-  public static final Log LOG = LogFactory.getLog(WALReplayWithIndexWritesAndCompressedWALIT.class);
+  public static final Logger LOGGER = LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
   @Rule
   public TableName table = new TableName();
   private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
@@ -143,7 +143,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     UTIL.startMiniZKCluster();
 
     Path hbaseRootDir = UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
-    LOG.info("hbase.rootdir=" + hbaseRootDir);
+    LOGGER.info("hbase.rootdir=" + hbaseRootDir);
     UTIL.getConfiguration().set(HConstants.HBASE_DIR, hbaseRootDir.toString());
     UTIL.startMiniHBaseCluster(1, 1);
   }
@@ -291,7 +291,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     assertEquals("splits=" + splits, 1, splits.size());
     // Make sure the file exists
     assertTrue(fs.exists(splits.get(0)));
-    LOG.info("Split file=" + splits.get(0));
+    LOGGER.info("Split file=" + splits.get(0));
     return splits.get(0);
   }
 
@@ -304,7 +304,7 @@ private int getKeyValueCount(HTable table) throws IOException {
     int count = 0;
     for (Result res : results) {
       count += res.list().size();
-      LOG.debug(count + ") " + res);
+      LOGGER.debug(count + ") " + res);
     }
     results.close();
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index b7bc107..ec4e920 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
  */
 public class Sandbox {
 
-    private static final Logger LOG = LoggerFactory.getLogger(Sandbox.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Sandbox.class);
 
     public static void main(String[] args) throws Exception {
         System.out.println("Starting Phoenix sandbox");
@@ -50,7 +50,7 @@ public class Sandbox {
                         testUtil.shutdownMiniCluster();
                     }
                 } catch (Exception e) {
-                    LOG.error("Exception caught when shutting down mini cluster", e);
+                    LOGGER.error("Exception caught when shutting down mini cluster", e);
                 }
             }
         });
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index 6d2dfb9..54de19a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -18,8 +18,6 @@ package org.apache.phoenix.end2end;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -41,6 +39,8 @@ import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
@@ -68,7 +68,7 @@ import static org.junit.Assert.fail;
 @RunWith(Parameterized.class)
 public class BasePermissionsIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(BasePermissionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BasePermissionsIT.class);
 
     static String SUPERUSER;
 
@@ -271,7 +271,7 @@ public class BasePermissionsIT extends BaseTest {
                     for(String tableOrSchema : tableOrSchemaList) {
                         String grantStmtSQL = "GRANT '" + actions + "' ON " + (isSchema ? " SCHEMA " : " TABLE ") + tableOrSchema + " TO "
                                 + ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                        LOG.info("Grant Permissions SQL: " + grantStmtSQL);
+                        LOGGER.info("Grant Permissions SQL: " + grantStmtSQL);
                         assertFalse(stmt.execute(grantStmtSQL));
                     }
                 }
@@ -286,7 +286,7 @@ public class BasePermissionsIT extends BaseTest {
             public Object run() throws Exception {
                 try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
                     String grantStmtSQL = "GRANT '" + actions + "' TO " + " '" + user.getShortName() + "'";
-                    LOG.info("Grant Permissions SQL: " + grantStmtSQL);
+                    LOGGER.info("Grant Permissions SQL: " + grantStmtSQL);
                     assertFalse(stmt.execute(grantStmtSQL));
                 }
                 return null;
@@ -308,7 +308,7 @@ public class BasePermissionsIT extends BaseTest {
                     for(String tableOrSchema : tableOrSchemaList) {
                         String revokeStmtSQL = "REVOKE ON " + (isSchema ? " SCHEMA " : " TABLE ") + tableOrSchema + " FROM "
                                 + ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                        LOG.info("Revoke Permissions SQL: " + revokeStmtSQL);
+                        LOGGER.info("Revoke Permissions SQL: " + revokeStmtSQL);
                         assertFalse(stmt.execute(revokeStmtSQL));
                     }
                 }
@@ -324,7 +324,7 @@ public class BasePermissionsIT extends BaseTest {
                 try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
                     String revokeStmtSQL = "REVOKE FROM " +
                             ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                    LOG.info("Revoke Permissions SQL: " + revokeStmtSQL);
+                    LOGGER.info("Revoke Permissions SQL: " + revokeStmtSQL);
                     assertFalse(stmt.execute(revokeStmtSQL));
                 }
                 return null;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
index feb506f..07c12be 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
  */
 public class End2EndTestDriver extends AbstractHBaseTool {
     
-    private static final Logger LOG = LoggerFactory.getLogger(End2EndTestDriver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(End2EndTestDriver.class);
     private static final String SHORT_REGEX_ARG = "r";
     private static final String SKIP_TESTS = "n";
     
@@ -80,7 +80,7 @@ public class End2EndTestDriver extends AbstractHBaseTool {
         try {
           testFilterRe = Pattern.compile(pattern);
         } catch (PatternSyntaxException e) {
-          LOG.error("Failed to find tests using pattern '" + pattern
+          LOGGER.error("Failed to find tests using pattern '" + pattern
               + "'. Is it a valid Java regular expression?", e);
           throw e;
         }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index 95b9f84..2f29654 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -54,7 +54,7 @@ import org.slf4j.LoggerFactory;
 @RunWith(Parameterized.class)
 public class OrphanViewToolIT extends ParallelStatsDisabledIT {
     private static final String SYSTEM_CHILD_LINK_NAME = SYSTEM_CATALOG_NAME;
-    private static final Logger LOG = LoggerFactory.getLogger(OrphanViewToolIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewToolIT.class);
 
     private final boolean isMultiTenant;
     private final boolean columnEncoded;
@@ -214,7 +214,7 @@ public class OrphanViewToolIT extends ParallelStatsDisabledIT {
         }
         int count = reader.getLineNumber();
         if (count != lineCount)
-            LOG.debug(count + " != " + lineCount);
+            LOGGER.debug(count + " != " + lineCount);
         assertTrue(count == lineCount);
     }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
index 694f359..cf48f5f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -29,8 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTableInterface;
@@ -52,11 +50,13 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
 public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Log LOG = LogFactory.getLog(IndexRebuildIncrementDisableCountIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
     private static long pendingDisableCount = 0;
     private static String ORG_PREFIX = "ORG";
     private static Result pendingDisableCountResult = null;
@@ -124,7 +124,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
             return Bytes.toLong(pendingDisableCountResult.getValue(TABLE_FAMILY_BYTES,
                 PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES));
         } catch (Exception e) {
-            LOG.error("Exception in getPendingDisableCount: " + e);
+            LOGGER.error("Exception in getPendingDisableCount: " + e);
             return 0;
         }
     }
@@ -148,7 +148,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
                         Thread.sleep(100);
                     }
                 } catch (Exception e) {
-                    LOG.error("Error in checkPendingDisableCount : " + e);
+                    LOGGER.error("Error in checkPendingDisableCount : " + e);
                 }
             }
         };
@@ -175,7 +175,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
             }
             conn.commit();
         } catch (Exception e) {
-            LOG.error("Client side exception:" + e);
+            LOGGER.error("Client side exception:" + e);
         }
     }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
index aee9d5b..6d06505 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -50,12 +50,11 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Test;
-
-import com.sun.org.apache.commons.logging.Log;
-import com.sun.org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
-    private static final Log LOG = LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
 
     @Test
     public void testCachedConnections() throws Throwable {
@@ -119,7 +118,7 @@ public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
                     }
                 };
         int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
-        LOG.info("Client version: " + version);
+        LOGGER.info("Client version: " + version);
         HTableInterface ht =
                 queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
         try {
@@ -132,7 +131,7 @@ public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
             assert (PIndexState.valueOf(result.getTable().getIndexes(0).getIndexState())
                     .equals(PIndexState.DISABLE));
         } catch (Exception e) {
-            LOG.error("Exception Occurred: " + e);
+            LOGGER.error("Exception Occurred: " + e);
 
         } finally {
             Closeables.closeQuietly(ht);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 3a3b9af..f199f54 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -696,13 +696,13 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
                 Threads.sleep(10000);
             }
           } catch (Exception ex) {
-            Log.info(ex);
+              Log.info(ex);
           }
           long waitStartTime = System.currentTimeMillis();
           // wait until merge happened
           while (System.currentTimeMillis() - waitStartTime < 10000) {
             List<HRegionInfo> regions = admin.getTableRegions(indexTable);
-            Log.info("Waiting:" + regions.size());
+              Log.info("Waiting:" + regions.size());
             if (regions.size() < numRegions) {
               break;
             }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index 48265ed..36d35a8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -33,8 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -62,6 +60,8 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -76,7 +76,7 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class MutableIndexReplicationIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(MutableIndexReplicationIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MutableIndexReplicationIT.class);
 
     public static final String SCHEMA_NAME = "";
     public static final String DATA_TABLE_NAME = "T";
@@ -136,7 +136,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         conf1 = utility1.getConfiguration();
         zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
         admin = new ReplicationAdmin(conf1);
-        LOG.info("Setup first Zk");
+        LOGGER.info("Setup first Zk");
 
         // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
         conf2 = HBaseConfiguration.create(conf1);
@@ -153,20 +153,20 @@ public class MutableIndexReplicationIT extends BaseTest {
         //replicate from cluster 1 -> cluster 2, but not back again
         admin.addPeer("1", utility2.getClusterKey());
 
-        LOG.info("Setup second Zk");
+        LOGGER.info("Setup second Zk");
         utility1.startMiniCluster(2);
         utility2.startMiniCluster(2);
     }
 
     private static void setupDriver() throws Exception {
-        LOG.info("Setting up phoenix driver");
+        LOGGER.info("Setting up phoenix driver");
         Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
         // Forces server cache to be used
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
         // Must update config before starting server
         URL = getLocalClusterUrl(utility1);
-        LOG.info("Connecting driver to "+URL);
+        LOGGER.info("Connecting driver to "+URL);
         driver = initAndRegisterTestDriver(URL, new ReadOnlyProps(props.entrySet().iterator()));
     }
 
@@ -205,7 +205,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             //create it as-is on the remote cluster
             admin2.createTable(desc);
 
-            LOG.info("Enabling replication on source table: "+tableName);
+            LOGGER.info("Enabling replication on source table: "+tableName);
             HColumnDescriptor[] cols = desc.getColumnFamilies();
             assertEquals(1, cols.length);
             // add the replication scope to the column
@@ -216,7 +216,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             admin.disableTable(desc.getTableName());
             admin.modifyTable(tableName, desc);
             admin.enableTable(desc.getTableName());
-            LOG.info("Replication enabled on source table: "+tableName);
+            LOGGER.info("Replication enabled on source table: "+tableName);
         }
 
 
@@ -243,7 +243,7 @@ public class MutableIndexReplicationIT extends BaseTest {
 
         // other table can't be reached through Phoenix right now - would need to change how we
         // lookup tables. For right now, we just go through an HTable
-        LOG.info("Looking up tables in replication target");
+        LOGGER.info("Looking up tables in replication target");
         TableName[] tables = admin2.listTableNames();
         HTable remoteTable = new HTable(utility2.getConfiguration(), tables[0]);
         for (int i = 0; i < REPLICATION_RETRIES; i++) {
@@ -254,7 +254,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             if (ensureAnyRows(remoteTable)) {
                 break;
             }
-            LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
+            LOGGER.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
                     + " for edits to get replicated");
             Thread.sleep(REPLICATION_WAIT_TIME_MILLIS);
         }
@@ -267,7 +267,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         ResultScanner scanner = remoteTable.getScanner(scan);
         boolean found = false;
         for (Result r : scanner) {
-            LOG.info("got row: " + r);
+            LOGGER.info("got row: " + r);
             found = true;
         }
         scanner.close();
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index f5ef58d..6ec3e2f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -79,7 +79,7 @@ import com.google.common.collect.Maps;
 @SuppressWarnings("deprecation")
 @RunWith(RunUntilFailure.class)
 public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Logger LOG = LoggerFactory.getLogger(PartialIndexRebuilderIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PartialIndexRebuilderIT.class);
     private static final Random RAND = new Random(5);
     private static final int WAIT_AFTER_DISABLED = 5000;
     private static final long REBUILD_PERIOD = 50000;
@@ -138,7 +138,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
                         Thread.interrupted();
                         throw new RuntimeException(e);
                     } catch (SQLException e) {
-                        LOG.error(e.getMessage(),e);
+                        LOGGER.error(e.getMessage(),e);
                     } finally {
                         runRebuildOnce = false;
                     }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index 5916c43..b920bf4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -22,8 +22,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -40,13 +38,15 @@ import org.apache.phoenix.hbase.index.covered.CoveredColumn;
 import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test that we correctly fail for versions of HBase that don't support current properties
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class FailForUnsupportedHBaseVersionsIT {
-    private static final Log LOG = LogFactory.getLog(FailForUnsupportedHBaseVersionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
 
     /**
      * We don't support WAL Compression for HBase &lt; 0.94.9, so we shouldn't even allow the server
@@ -151,7 +151,7 @@ public class FailForUnsupportedHBaseVersionsIT {
                 // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
                 // broken.
                 while (!server.isAborted()) {
-                    LOG.debug("Waiting on regionserver to abort..");
+                    LOGGER.debug("Waiting on regionserver to abort..");
                 }
             }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 923673b..d8d3687 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -59,8 +59,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -71,6 +69,8 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -85,7 +85,7 @@ import com.google.common.collect.Sets;
  */
 public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMetricsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsIT.class);
 
     @Test
     public void testResetGlobalPhoenixMetrics() {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
index d1dda04..ec62a42 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class ConnectionCachingIT extends ParallelStatsEnabledIT {
-  private static final Logger LOG = LoggerFactory.getLogger(ConnectionCachingIT.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionCachingIT.class);
 
   @Parameters(name= "phoenix.scanner.lease.renew.enabled={0}")
   public static Iterable<String> data() {
@@ -65,7 +65,7 @@ public class ConnectionCachingIT extends ParallelStatsEnabledIT {
     // The test driver works correctly, the real one doesn't.
     String url = getUrl();
     url = url.replace(";" + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM, "");
-    LOG.info("URL to use is: {}", url);
+    LOGGER.info("URL to use is: {}", url);
 
     Connection conn = DriverManager.getConnection(url, props);
     long before = getNumCachedConnections(conn);
@@ -76,7 +76,7 @@ public class ConnectionCachingIT extends ParallelStatsEnabledIT {
     Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS / 2);
     long after = getNumCachedConnections(conn);
     for (int i = 0; i < 6; i++) {
-      LOG.info("Found {} connections cached", after);
+      LOGGER.info("Found {} connections cached", after);
       if (after <= before) {
         break;
       }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
index 708ecad..8a9f4e0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
@@ -29,8 +29,6 @@ import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.impl.MilliSpan;
@@ -42,6 +40,8 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.After;
 import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Base test for tracing tests - helps manage getting tracing/non-tracing connections, as well as
@@ -50,7 +50,7 @@ import org.junit.Before;
 
 public class BaseTracingTestIT extends ParallelStatsDisabledIT {
 
-    private static final Log LOG = LogFactory.getLog(BaseTracingTestIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseTracingTestIT.class);
 
     protected CountDownLatch latch;
     protected int defaultTracingThreadPoolForTest = 1;
@@ -154,7 +154,7 @@ public class BaseTracingTestIT extends ParallelStatsDisabledIT {
                 }
                 return connection;
             } catch (SQLException e) {
-                LOG.error("New connection failed for tracing Table: " + tableName, e);
+                LOGGER.error("New connection failed for tracing Table: " + tableName, e);
                 return null;
             }
         }
@@ -170,7 +170,7 @@ public class BaseTracingTestIT extends ParallelStatsDisabledIT {
                 executor.shutdownNow();
                 executor.awaitTermination(5, TimeUnit.SECONDS);
             } catch (InterruptedException e) {
-                LOG.error("Failed to stop the thread. ", e);
+                LOGGER.error("Failed to stop the thread. ", e);
             }
         }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 03510dc..0fc4095 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -31,8 +31,6 @@ import java.util.*;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.htrace.*;
 import org.apache.htrace.impl.ProbabilitySampler;
@@ -42,6 +40,8 @@ import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -51,7 +51,7 @@ import com.google.common.collect.ImmutableMap;
 
 public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixTracingEndToEndIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTracingEndToEndIT.class);
     private static final int MAX_RETRIES = 10;
     private String enabledForLoggingTable;
     private String enableForLoggingIndex;
@@ -69,7 +69,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testWriteSpans() throws Exception {
 
-        LOG.info("testWriteSpans TableName: " + tracingTableName);
+        LOGGER.info("testWriteSpans TableName: " + tracingTableName);
         // watch our sink so we know when commits happen
         latch = new CountDownLatch(1);
 
@@ -133,7 +133,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testClientServerIndexingTracing() throws Exception {
 
-        LOG.info("testClientServerIndexingTracing TableName: " + tracingTableName);
+        LOGGER.info("testClientServerIndexingTracing TableName: " + tracingTableName);
         // one call for client side, one call for server side
         latch = new CountDownLatch(2);
         testTraceWriter.start();
@@ -144,7 +144,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
         // trace the requests we send
         Connection traceable = getTracingConnection();
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = traceable.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -159,7 +159,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         traceable.commit();
 
         // wait for the latch to countdown, as the metrics system is time-based
-        LOG.debug("Waiting for latch to complete!");
+        LOGGER.debug("Waiting for latch to complete!");
         latch.await(200, TimeUnit.SECONDS);// should be way more than GC pauses
 
         // read the traces back out
@@ -212,7 +212,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testScanTracing() throws Exception {
 
-        LOG.info("testScanTracing TableName: " + tracingTableName);
+        LOGGER.info("testScanTracing TableName: " + tracingTableName);
 
         // separate connections to minimize amount of traces that are generated
         Connection traceable = getTracingConnection();
@@ -226,7 +226,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -265,7 +265,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testScanTracingOnServer() throws Exception {
 
-        LOG.info("testScanTracingOnServer TableName: " + tracingTableName);
+        LOGGER.info("testScanTracingOnServer TableName: " + tracingTableName);
 
         // separate connections to minimize amount of traces that are generated
         Connection traceable = getTracingConnection();
@@ -279,7 +279,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -317,7 +317,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testCustomAnnotationTracing() throws Exception {
 
-        LOG.info("testCustomAnnotationTracing TableName: " + tracingTableName);
+        LOGGER.info("testCustomAnnotationTracing TableName: " + tracingTableName);
 
     	final String customAnnotationKey = "myannot";
     	final String customAnnotationValue = "a1";
@@ -334,7 +334,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -420,7 +420,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testSingleSpan() throws Exception {
 
-        LOG.info("testSingleSpan TableName: " + tracingTableName);
+        LOGGER.info("testSingleSpan TableName: " + tracingTableName);
 
         Properties props = new Properties(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -446,7 +446,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testMultipleSpans() throws Exception {
 
-        LOG.info("testMultipleSpans TableName: " + tracingTableName);
+        LOGGER.info("testMultipleSpans TableName: " + tracingTableName);
 
         Connection conn = getConnectionWithoutTracing();
         latch = new CountDownLatch(4);
@@ -510,7 +510,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         Iterator<SpanInfo> spanIter = trace.spans.iterator();
         for (Span span : spans) {
             SpanInfo spanInfo = spanIter.next();
-            LOG.info("Checking span:\n" + spanInfo);
+            LOGGER.info("Checking span:\n" + spanInfo);
 
             long parentId = span.getParentId();
             if(parentId == Span.ROOT_SPAN_ID) {
@@ -551,7 +551,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         outer: while (retries < MAX_RETRIES) {
             Collection<TraceHolder> traces = reader.readAll(100);
             for (TraceHolder trace : traces) {
-                LOG.info("Got trace: " + trace);
+                LOGGER.info("Got trace: " + trace);
                 found = checker.foundTrace(trace);
                 if (found) {
                     break outer;
@@ -563,7 +563,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
                     }
                 }
             }
-            LOG.info("======  Waiting for tracing updates to be propagated ========");
+            LOGGER.info("======  Waiting for tracing updates to be propagated ========");
             Thread.sleep(1000);
             retries++;
         }
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index a697382..6d82c7a 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
@@ -29,7 +27,8 @@ import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
 import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import com.google.common.base.Preconditions;
 
 /**
@@ -38,7 +37,7 @@ import com.google.common.base.Preconditions;
  */
 public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixRpcSchedulerFactory.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
 
     private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
             "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
@@ -51,7 +50,7 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
             // happens in <=0.98.4 where the scheduler factory is not visible
             delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
         } catch (IllegalAccessError e) {
-            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
+            LOGGER.error(VERSION_TOO_OLD_FOR_INDEX_RPC);
             throw e;
         }
 
@@ -64,7 +63,8 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
         // validate index and metadata priorities are not the same
         Preconditions.checkArgument(indexPriority != metadataPriority, "Index and Metadata priority must not be same "+ indexPriority);
-        LOG.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
+        LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority "
+                + indexPriority + " and metadata rpc priority " + metadataPriority);
 
         PhoenixRpcScheduler scheduler =
                 new PhoenixRpcScheduler(conf, delegate, indexPriority, metadataPriority);
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index d77a30f..ab670d3 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -23,8 +23,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
@@ -62,13 +60,15 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.RepairUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
     
     private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
-    public static final Log LOG = LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
 
     @Override
     public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
@@ -196,10 +196,10 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
         if (!store.hasReferences()) {
             InternalScanner repairScanner = null;
             if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
-                LOG.info("we have found inconsistent data for local index for region:"
+                LOGGER.info("we have found inconsistent data for local index for region:"
                         + c.getEnvironment().getRegion().getRegionInfo());
                 if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) {
-                    LOG.info("Starting automatic repair of local Index for region:"
+                    LOGGER.info("Starting automatic repair of local Index for region:"
                             + c.getEnvironment().getRegion().getRegionInfo());
                     repairScanner = getRepairScanner(c.getEnvironment(), store);
                 }
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
index 80f2dd2..799357d 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
@@ -24,11 +24,10 @@ import java.io.PushbackInputStream;
 
 import javax.annotation.Nonnull;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.codec.Codec;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 /**
  * This class is a copy paste version of org.apache.hadoop.hbase.codec.BaseDecoder class. 
  * This class is meant to be used in {@link IndexedWALEditCodec} when runtime version of
@@ -36,7 +35,7 @@ import org.apache.hadoop.hbase.codec.Codec;
  * HBASE-14501. See PHOENIX-2629 and PHOENIX-2636 for details.
  */
 public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
-  protected static final Log LOG = LogFactory.getLog(BinaryCompatibleBaseDecoder.class);
+  protected static final Logger LOGGER = LoggerFactory.getLogger(BinaryCompatibleBaseDecoder.class);
 
   protected final InputStream in;
   private Cell current = null;
@@ -79,11 +78,11 @@ public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
     try {
       isEof = this.in.available() == 0;
     } catch (Throwable t) {
-      LOG.trace("Error getting available for error message - ignoring", t);
+      LOGGER.trace("Error getting available for error message - ignoring", t);
     }
     if (!isEof) throw ioEx;
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Partial cell read caused by EOF", ioEx);
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Partial cell read caused by EOF", ioEx);
     }
     EOFException eofEx = new EOFException("Partial cell read");
     eofEx.initCause(ioEx);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 5e284bd..bc67674 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -39,8 +39,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.client.HTableInterface;
@@ -74,6 +72,8 @@ import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -85,7 +85,7 @@ import org.apache.phoenix.util.ScanUtil;
 public class ServerCacheClient {
     public static final int UUID_LENGTH = Bytes.SIZEOF_LONG;
     public static final byte[] KEY_IN_FIRST_REGION = new byte[]{0};
-    private static final Log LOG = LogFactory.getLog(ServerCacheClient.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class);
     private static final Random RANDOM = new Random();
 	public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = "hash.join.server.cache.resend.per.server";
     private final PhoenixConnection connection;
@@ -254,7 +254,9 @@ public class ServerCacheClient {
                                 cacheUsingTable.getIndexType() == IndexType.LOCAL)) {  
                     // Call RPC once per server
                     servers.add(entry);
-                    if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
+                    }
                     final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                     final HTableInterface htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
                     closeables.add(htable);
@@ -281,7 +283,10 @@ public class ServerCacheClient {
                         }
                     }));
                 } else {
-                    if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));}
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry +
+                                " since one already exists for that entry", connection));
+                    }
                 }
             }
             
@@ -320,7 +325,9 @@ public class ServerCacheClient {
                 }
             }
         }
-        if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));}
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
+        }
         return hashCacheSpec;
     }
     
@@ -346,8 +353,8 @@ public class ServerCacheClient {
              * through the current metadata boundaries and remove the cache once for each server that we originally sent
              * to.
              */
-            if (LOG.isDebugEnabled()) {
-                LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
             }
             for (HRegionLocation entry : locations) {
              // Call once per server
@@ -390,13 +397,13 @@ public class ServerCacheClient {
                         remainingOnServers.remove(entry);
                     } catch (Throwable t) {
                         lastThrowable = t;
-                        LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
+                        LOGGER.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
                                 t);
                     }
                 }
             }
             if (!remainingOnServers.isEmpty()) {
-                LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
                         lastThrowable);
             }
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
index 7dc90f8..f3b568b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
@@ -19,9 +19,8 @@ package org.apache.phoenix.call;
 
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 /**
  * Helper class to run a Call with a set of {@link CallWrapper}
  */
@@ -38,7 +37,7 @@ public class CallRunner {
         public V call() throws E;
     }
 
-    private static final Log LOG = LogFactory.getLog(CallRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CallRunner.class);
 
     private CallRunner() {
         // no ctor for util class
@@ -57,7 +56,7 @@ public class CallRunner {
                 try {
                     wrappers[i].after();
                 } catch (Exception e) {
-                    LOG.error("Failed to complete wrapper " + wrappers[i], e);
+                    LOGGER.error("Failed to complete wrapper " + wrappers[i], e);
                 }
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 8c58213..468b7bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -266,7 +266,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     IndexUtil.incrementCounterForIndex(conn, indexName, -PENDING_DISABLE_INACTIVE_STATE_COUNT);
                     indexesIncremented.add(index);
                 }catch(Exception e) {
-                    LOG.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" + index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
+                    LOG.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" +
+                            index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
                 }
             }
             return indexesIncremented;
@@ -474,7 +475,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								long disabledTimeStampVal = index.getIndexDisableTimestamp();
 								if (disabledTimeStampVal != 0) {
                                     if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) {
-                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild); 
+                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild);
                                     }
 								    signOfDisableTimeStamp = Long.signum(disabledTimeStampVal);
 	                                disabledTimeStampVal = Math.abs(disabledTimeStampVal);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 1d2dc55..606f8f3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -26,8 +26,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -70,6 +68,8 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.MetaDataUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.RpcCallback;
@@ -82,9 +82,9 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
     private boolean hbaseAccessControllerEnabled;
     private UserProvider userProvider;
     private AccessChecker accessChecker;
-    public static final Log LOG = LogFactory.getLog(PhoenixAccessController.class);
-    private static final Log AUDITLOG =
-            LogFactory.getLog("SecurityLogger."+PhoenixAccessController.class.getName());
+    public static final Logger LOGGER = LoggerFactory.getLogger(PhoenixAccessController.class);
+    private static final Logger AUDITLOG =
+            LoggerFactory.getLogger("SecurityLogger."+PhoenixAccessController.class.getName());
 
     private List<BaseMasterAndRegionObserver> getAccessControllers() throws IOException {
         if (accessControllers == null) {
@@ -123,7 +123,7 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
         this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
                 QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
         if (!this.accessCheckEnabled) {
-            LOG.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
+            LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
         }
         if (env instanceof PhoenixMetaDataControllerEnvironment) {
             this.env = (PhoenixMetaDataControllerEnvironment)env;
@@ -601,8 +601,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
                 }
               }
             }
-        } else if (LOG.isDebugEnabled()) {
-            LOG.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
+        } else if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
         }
         return false;
     }
@@ -627,7 +627,7 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
     }
 
     private static final class Superusers {
-        private static final Log LOG = LogFactory.getLog(Superusers.class);
+        private static final Logger LOGGER = LoggerFactory.getLogger(Superusers.class);
 
         /** Configuration key for superusers */
         public static final String SUPERUSER_CONF_KEY = org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting a name
@@ -655,8 +655,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
                     + "authorization checks for internal operations will not work correctly!");
             }
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Current user name is " + systemUser.getShortName());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Current user name is " + systemUser.getShortName());
             }
             String currentUser = systemUser.getShortName();
             String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index bee9bde..2180c12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -81,6 +79,8 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
@@ -95,7 +95,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class BaseQueryPlan implements QueryPlan {
-	private static final Log LOG = LogFactory.getLog(BaseQueryPlan.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryPlan.class);
     protected static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K
     
     protected final TableRef tableRef;
@@ -357,13 +357,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
             }
         }
         
-        if (LOG.isDebugEnabled()) {
-        	LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
+        if (LOGGER.isDebugEnabled()) {
+        	LOGGER.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
         }
         
         ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
-        if (LOG.isDebugEnabled()) {
-        	LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
+        if (LOGGER.isDebugEnabled()) {
+        	LOGGER.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
         }
 
         // wrap the iterator so we start/end tracing as we expect
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index bfe089d..92fb5b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -33,8 +33,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
@@ -82,13 +80,15 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.CostUtil;
 import org.apache.phoenix.util.SQLCloseables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class HashJoinPlan extends DelegateQueryPlan {
-    private static final Log LOG = LogFactory.getLog(HashJoinPlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(HashJoinPlan.class);
 
     private final SelectStatement statement;
     private final HashJoinInfo joinInfo;
@@ -503,7 +503,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
                     boolean isSet = parent.firstJobEndTime.compareAndSet(0, endTime);
                     if (!isSet && (endTime
                             - parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive) {
-                        LOG.warn(addCustomAnnotations(
+                        LOGGER.warn(addCustomAnnotations(
                             "Hash plan [" + index
                                     + "] execution seems too slow. Earlier hash cache(s) might have expired on servers.",
                             parent.delegate.getContext().getConnection()));
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
index 9d48feb..6644a7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Locale;
 
 import org.apache.commons.lang.BooleanUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
@@ -37,6 +35,8 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.VarBinaryFormatter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.force.db.i18n.LinguisticSort;
 import com.force.i18n.LocaleUtils;
@@ -87,7 +87,7 @@ import com.force.i18n.LocaleUtils;
 		@FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true) })
 public class CollationKeyFunction extends ScalarFunction {
 
-	private static final Log LOG = LogFactory.getLog(CollationKeyFunction.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(CollationKeyFunction.class);
 
 	public static final String NAME = "COLLATION_KEY";
 
@@ -114,8 +114,8 @@ public class CollationKeyFunction extends ScalarFunction {
 			return false;
 		}
 		String inputString = (String) PVarchar.INSTANCE.toObject(ptr, expression.getSortOrder());
-		if (LOG.isTraceEnabled()) {
-			LOG.trace("CollationKey inputString: " + inputString);
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace("CollationKey inputString: " + inputString);
 		}
 
 		if (inputString == null) {
@@ -124,8 +124,8 @@ public class CollationKeyFunction extends ScalarFunction {
 
 		byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
 		}
 
 		ptr.set(collationKeyByteArray);
@@ -138,19 +138,19 @@ public class CollationKeyFunction extends ScalarFunction {
 		Integer collatorStrength = getLiteralValue(3, Integer.class);
 		Integer collatorDecomposition = getLiteralValue(4, Integer.class);
 
-		if (LOG.isTraceEnabled()) {
+		if (LOGGER.isTraceEnabled()) {
 			StringBuilder logInputsMessage = new StringBuilder();
 			logInputsMessage.append("Input (literal) arguments:").append("localeISOCode: " + localeISOCode)
 					.append(", useSpecialUpperCaseCollator: " + useSpecialUpperCaseCollator)
 					.append(", collatorStrength: " + collatorStrength)
 					.append(", collatorDecomposition: " + collatorDecomposition);
-			LOG.trace(logInputsMessage);
+			LOGGER.trace(logInputsMessage.toString());
 		}
 
 		Locale locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode);
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace(String.format("Locale: " + locale.toLanguageTag()));
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace(String.format("Locale: " + locale.toLanguageTag()));
 		}
 
 		LinguisticSort linguisticSort = LinguisticSort.get(locale);
@@ -166,8 +166,8 @@ public class CollationKeyFunction extends ScalarFunction {
 			collator.setDecomposition(collatorDecomposition);
 		}
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
 					collator.getStrength(), collator.getDecomposition(),
 					BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
 		}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 1c99588..1c036ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -29,8 +29,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -88,6 +86,8 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
@@ -115,7 +115,7 @@ import com.google.common.collect.Multimap;
  */
 public class Indexer extends BaseRegionObserver {
 
-  private static final Log LOG = LogFactory.getLog(Indexer.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(Indexer.class);
   private static final OperationStatus IGNORE = new OperationStatus(OperationStatusCode.SUCCESS);
   private static final OperationStatus NOWRITE = new OperationStatus(OperationStatusCode.SUCCESS);
   
@@ -233,7 +233,7 @@ public class Indexer extends BaseRegionObserver {
                 StoreFailuresInCachePolicy.class, IndexFailurePolicy.class);
           IndexFailurePolicy policy =
               policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits);
-          LOG.debug("Setting up recovery writter with failure policy: " + policy.getClass());
+          LOGGER.debug("Setting up recovery writter with failure policy: " + policy.getClass());
           recoveryWriter =
               new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer");
         } catch (Exception ex) {
@@ -242,7 +242,7 @@ public class Indexer extends BaseRegionObserver {
       } catch (NoSuchMethodError ex) {
           disabled = true;
           super.start(e);
-          LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
+          LOGGER.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
       }
   }
 
@@ -324,8 +324,8 @@ public class Indexer extends BaseRegionObserver {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
               }
               metricSource.incrementSlowDuplicateKeyCheckCalls();
           }
@@ -349,8 +349,8 @@ public class Indexer extends BaseRegionObserver {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -503,8 +503,8 @@ public class Indexer extends BaseRegionObserver {
 
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -575,8 +575,8 @@ public class Indexer extends BaseRegionObserver {
            removeBatchMutateContext(c);
            long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
            if (duration >= slowIndexWriteThreshold) {
-               if (LOG.isDebugEnabled()) {
-                   LOG.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
+               if (LOGGER.isDebugEnabled()) {
+                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
                }
                metricSource.incrementNumSlowIndexWriteCalls();
            }
@@ -615,8 +615,8 @@ public class Indexer extends BaseRegionObserver {
 
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexWriteThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
               }
               metricSource.incrementNumSlowIndexWriteCalls();
           }
@@ -674,7 +674,7 @@ public class Indexer extends BaseRegionObserver {
           return;
         }
 
-        LOG.info("Found some outstanding index updates that didn't succeed during"
+        LOGGER.info("Found some outstanding index updates that didn't succeed during"
                 + " WAL replay - attempting to replay now.");
 
         // do the usual writer stuff, killing the server again, if we can't manage to make the index
@@ -682,14 +682,14 @@ public class Indexer extends BaseRegionObserver {
         try {
             writer.writeAndKillYourselfOnFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
         } catch (IOException e) {
-                LOG.error("During WAL replay of outstanding index updates, "
+                LOGGER.error("During WAL replay of outstanding index updates, "
                         + "Exception is thrown instead of killing server during index writing", e);
         }
     } finally {
          long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
          if (duration >= slowPostOpenThreshold) {
-             if (LOG.isDebugEnabled()) {
-                 LOG.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
+             if (LOGGER.isDebugEnabled()) {
+                 LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
              }
              metricSource.incrementNumSlowPostOpenCalls();
          }
@@ -722,8 +722,8 @@ public class Indexer extends BaseRegionObserver {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowPreWALRestoreThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
               }
               metricSource.incrementNumSlowPreWALRestoreCalls();
           }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
index 4cc7b23..cb5fd22 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
@@ -25,12 +25,12 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -41,7 +41,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
  *
  */
 public class LockManager {
-    private static final Log LOG = LogFactory.getLog(LockManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LockManager.class);
 
     private final ConcurrentHashMap<ImmutableBytesPtr, RowLockContext> lockedRows =
             new ConcurrentHashMap<ImmutableBytesPtr, RowLockContext>();
@@ -96,7 +96,7 @@ public class LockManager {
             success = true;
             return result;
         } catch (InterruptedException ie) {
-            LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
+            LOGGER.warn("Thread interrupted waiting for lock on row: " + rowKey);
             InterruptedIOException iie = new InterruptedIOException();
             iie.initCause(ie);
             if (traceScope != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
index 59b7619..62e5f4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
@@ -14,8 +14,6 @@ import java.lang.reflect.Constructor;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Increment;
@@ -27,6 +25,8 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
 import org.apache.phoenix.hbase.index.covered.IndexCodec;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Basic implementation of the {@link IndexBuilder} that doesn't do any actual work of indexing.
@@ -38,7 +38,7 @@ import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
  */
 public abstract class BaseIndexBuilder implements IndexBuilder {
     public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class";
-    private static final Log LOG = LogFactory.getLog(BaseIndexBuilder.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseIndexBuilder.class);
 
     protected boolean stopped;
     protected RegionCoprocessorEnvironment env;
@@ -120,7 +120,7 @@ public abstract class BaseIndexBuilder implements IndexBuilder {
 
     @Override
     public void stop(String why) {
-        LOG.debug("Stopping because: " + why);
+        LOGGER.debug("Stopping because: " + why);
         this.stopped = true;
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
index 6130093..6b7e416 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
@@ -22,8 +22,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Stoppable;
@@ -37,13 +35,15 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
 import org.apache.phoenix.hbase.index.Indexer;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.index.PhoenixIndexMetaData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage the building of index updates from primary table updates.
  */
 public class IndexBuildManager implements Stoppable {
 
-  private static final Log LOG = LogFactory.getLog(IndexBuildManager.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexBuildManager.class);
   private final IndexBuilder delegate;
   private boolean stopped;
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
index 97ac30d..6945ea7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
@@ -13,8 +13,6 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -27,6 +25,8 @@ import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;
 import org.apache.phoenix.hbase.index.covered.data.LocalTable;
 import org.apache.phoenix.hbase.index.covered.update.ColumnTracker;
 import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Build covered indexes for phoenix updates.
@@ -38,7 +38,7 @@ import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
  * bloated index that needs to be cleaned up by a background process.
  */
 public class NonTxIndexBuilder extends BaseIndexBuilder {
-    private static final Log LOG = LogFactory.getLog(NonTxIndexBuilder.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(NonTxIndexBuilder.class);
 
     protected LocalHBaseState localTable;
 
@@ -57,8 +57,8 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
 
         batchMutationAndAddUpdates(manager, state, mutation, indexMetaData);
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Found index updates for Mutation: " + mutation + "\n" + manager);
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Found index updates for Mutation: " + mutation + "\n" + manager);
         }
 
         return manager.toMap();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 0fc9e14..4a62e14 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -21,8 +21,6 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.SortedSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
@@ -34,6 +32,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
 import org.apache.phoenix.hbase.index.covered.LocalTableState;
 import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Like the HBase {@link MemStore}, but without all that extra work around maintaining snapshots and
@@ -74,7 +74,7 @@ import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
  */
 public class IndexMemStore implements KeyValueStore {
 
-  private static final Log LOG = LogFactory.getLog(IndexMemStore.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexMemStore.class);
   private IndexKeyValueSkipListSet kvset;
   private Comparator<KeyValue> comparator;
 
@@ -113,8 +113,8 @@ public class IndexMemStore implements KeyValueStore {
 
   @Override
   public void add(KeyValue kv, boolean overwrite) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Inserting: " + toString(kv));
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Inserting: " + toString(kv));
     }
     // if overwriting, we will always update
     if (!overwrite) {
@@ -124,17 +124,17 @@ public class IndexMemStore implements KeyValueStore {
       kvset.add(kv);
     }
 
-    if (LOG.isTraceEnabled()) {
+    if (LOGGER.isTraceEnabled()) {
       dump();
     }
   }
 
   private void dump() {
-    LOG.trace("Current kv state:\n");
+    LOGGER.trace("Current kv state:\n");
     for (KeyValue kv : this.kvset) {
-      LOG.trace("KV: " + toString(kv));
+      LOGGER.trace("KV: " + toString(kv));
     }
-    LOG.trace("========== END MemStore Dump ==================\n");
+    LOGGER.trace("========== END MemStore Dump ==================\n");
   }
 
   private String toString(KeyValue kv) {
@@ -144,12 +144,12 @@ public class IndexMemStore implements KeyValueStore {
 
   @Override
   public void rollback(KeyValue kv) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Rolling back: " + toString(kv));
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Rolling back: " + toString(kv));
     }
     // If the key is in the store, delete it
     this.kvset.remove(kv);
-    if (LOG.isTraceEnabled()) {
+    if (LOGGER.isTraceEnabled()) {
       dump();
     }
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
index 5cd3fcb..145c95b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
@@ -23,9 +23,9 @@ import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
@@ -41,7 +41,7 @@ import com.google.common.util.concurrent.MoreExecutors;
  */
 public abstract class BaseTaskRunner implements TaskRunner {
 
-  private static final Log LOG = LogFactory.getLog(BaseTaskRunner.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(BaseTaskRunner.class);
   protected ListeningExecutorService writerPool;
   private boolean stopped;
 
@@ -77,7 +77,7 @@ public abstract class BaseTaskRunner implements TaskRunner {
 
   private void logAndNotifyAbort(Exception e, Abortable abort) {
     String msg = "Found a failed task because: " + e.getMessage();
-    LOG.error(msg, e);
+    LOGGER.error(msg, e);
     abort.abort(msg, e.getCause());
   }
 
@@ -118,7 +118,7 @@ public abstract class BaseTaskRunner implements TaskRunner {
     if (this.stopped) {
       return;
     }
-    LOG.info("Shutting down task runner because " + why);
+    LOGGER.info("Shutting down task runner because " + why);
     this.writerPool.shutdownNow();
   }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
index 5b9717e..2df183e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
@@ -20,9 +20,8 @@ package org.apache.phoenix.hbase.index.parallel;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 
@@ -32,7 +31,7 @@ import com.google.common.util.concurrent.ListenableFuture;
  */
 public class QuickFailingTaskRunner extends BaseTaskRunner {
 
-  static final Log LOG = LogFactory.getLog(QuickFailingTaskRunner.class);
+  static final Logger LOGGER = LoggerFactory.getLogger(QuickFailingTaskRunner.class);
 
   /**
    * @param service thread pool to which {@link Task}s are submitted. This service is then 'owned'
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
index 62e4522..208464e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
@@ -22,9 +22,9 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A group of {@link Task}s. The tasks are all bound together using the same {@link Abortable} (
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.Abortable;
  * @param <V> expected result type from all the tasks
  */
 public class TaskBatch<V> implements Abortable {
-  private static final Log LOG = LogFactory.getLog(TaskBatch.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TaskBatch.class);
   private AtomicBoolean aborted = new AtomicBoolean();
   private List<Task<V>> tasks;
 
@@ -57,7 +57,7 @@ public class TaskBatch<V> implements Abortable {
     if (this.aborted.getAndSet(true)) {
       return;
     }
-    LOG.info("Aborting batch of tasks because " + why);
+    LOGGER.info("Aborting batch of tasks because " + why);
   }
 
   @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
index 58a976a..bedd495 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
@@ -17,10 +17,10 @@
  */
 package org.apache.phoenix.hbase.index.parallel;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper utility to make a thread pool from a configuration based on reasonable defaults and passed
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Pair;
  */
 public class ThreadPoolBuilder {
 
-  private static final Log LOG = LogFactory.getLog(ThreadPoolBuilder.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolBuilder.class);
   private static final long DEFAULT_TIMEOUT = 60;
   private static final int DEFAULT_MAX_THREADS = 1;// is there a better default?
   private Pair<String, Long> timeout;
@@ -72,7 +72,7 @@ public class ThreadPoolBuilder {
       maxThreads =
           key == null ? this.maxThreads.getSecond() : conf.getInt(key, this.maxThreads.getSecond());
     }
-    LOG.trace("Creating pool builder with max " + maxThreads + " threads ");
+    LOGGER.trace("Creating pool builder with max " + maxThreads + " threads ");
     return maxThreads;
   }
 
@@ -84,7 +84,7 @@ public class ThreadPoolBuilder {
           key == null ? this.timeout.getSecond() : conf.getLong(key, this.timeout.getSecond());
     }
 
-    LOG.trace("Creating pool builder with core thread timeout of " + timeout + " seconds ");
+    LOGGER.trace("Creating pool builder with core thread timeout of " + timeout + " seconds ");
     return timeout;
   }
 }
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
index db3b845..2de0528 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
@@ -27,18 +27,18 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Threads;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage access to thread pools
  */
 public class ThreadPoolManager {
 
-  private static final Log LOG = LogFactory.getLog(ThreadPoolManager.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolManager.class);
 
   /**
    * Get an executor for the given name, based on the passed {@link Configuration}. If a thread pool
@@ -62,7 +62,7 @@ public class ThreadPoolManager {
     ThreadPoolExecutor pool = (ThreadPoolExecutor) poolCache.get(builder.getName());
     if (pool == null || pool.isTerminating() || pool.isShutdown()) {
       pool = getDefaultExecutor(builder);
-      LOG.info("Creating new pool for " + builder.getName());
+      LOGGER.info("Creating new pool for " + builder.getName());
       poolCache.put(builder.getName(), pool);
     }
     ((ShutdownOnUnusedThreadPoolExecutor) pool).addReference();
@@ -120,14 +120,14 @@ public class ThreadPoolManager {
     @Override
     protected void finalize() {
       // override references counter if we go out of scope - ensures the pool gets cleaned up
-      LOG.info("Shutting down pool '" + poolName + "' because no more references");
+      LOGGER.info("Shutting down pool '" + poolName + "' because no more references");
       super.finalize();
     }
 
     @Override
     public void shutdown() {
       if (references.decrementAndGet() <= 0) {
-        LOG.debug("Shutting down pool " + this.poolName);
+        LOGGER.debug("Shutting down pool " + this.poolName);
         super.shutdown();
       }
     }
@@ -135,7 +135,7 @@ public class ThreadPoolManager {
     @Override
     public List<Runnable> shutdownNow() {
       if (references.decrementAndGet() <= 0) {
-        LOG.debug("Shutting down pool " + this.poolName + " NOW!");
+        LOGGER.debug("Shutting down pool " + this.poolName + " NOW!");
         return super.shutdownNow();
       }
       return Collections.emptyList();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index a4a34a1..389af36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -26,8 +26,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -45,6 +43,8 @@ import org.apache.phoenix.hbase.index.covered.Batch;
 import org.apache.phoenix.hbase.index.covered.data.LazyValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Longs;
@@ -65,7 +65,7 @@ public class IndexManagementUtil {
     public static final String WAL_EDIT_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
 
     private static final String INDEX_HLOG_READER_CLASS_NAME = "org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader";
-    private static final Log LOG = LogFactory.getLog(IndexManagementUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexManagementUtil.class);
 
     public static boolean isWALEditCodecSet(Configuration conf) {
         // check to see if the WALEditCodec is installed
@@ -191,10 +191,10 @@ public class IndexManagementUtil {
         try {
             throw e;
         } catch (IOException e1) {
-            LOG.info("Rethrowing " + e);
+            LOGGER.info("Rethrowing " + e);
             throw e1;
         } catch (Throwable e1) {
-            LOG.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
+            LOGGER.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
             throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
index 6c5c57c..41ba6bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
@@ -23,8 +23,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -34,6 +32,8 @@ import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -46,7 +46,7 @@ import com.google.common.collect.Multimap;
  */
 public class IndexWriter implements Stoppable {
 
-  private static final Log LOG = LogFactory.getLog(IndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriter.class);
   public static final String INDEX_COMMITTER_CONF_KEY = "index.writer.commiter.class";
   public static final String INDEX_FAILURE_POLICY_CONF_KEY = "index.writer.failurepolicy.class";
   private AtomicBoolean stopped = new AtomicBoolean(false);
@@ -160,8 +160,8 @@ public class IndexWriter implements Stoppable {
                                       boolean allowLocalUpdates, int clientVersion) throws IOException {
     try {
       write(toWrite, allowLocalUpdates, clientVersion);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Done writing all index updates!\n\t" + toWrite);
+      if (LOGGER.isTraceEnabled()) {
+        LOGGER.trace("Done writing all index updates!\n\t" + toWrite);
       }
     } catch (Exception e) {
       this.failurePolicy.handleFailure(toWrite, e);
@@ -199,8 +199,8 @@ public class IndexWriter implements Stoppable {
                                             boolean allowLocalUpdates, int clientVersion) throws IOException {
     try {
       write(toWrite, allowLocalUpdates, clientVersion);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Done writing all index updates!\n\t" + toWrite);
+      if (LOGGER.isTraceEnabled()) {
+        LOGGER.trace("Done writing all index updates!\n\t" + toWrite);
       }
     } catch (Exception e) {
       this.failurePolicy.handleFailure(toWrite, e);
@@ -272,7 +272,7 @@ public class IndexWriter implements Stoppable {
       // already stopped
       return;
     }
-    LOG.debug("Stopping because " + why);
+    LOGGER.debug("Stopping because " + why);
     this.writer.stop(why);
     this.failurePolicy.stop(why);
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index ef53b9f..dd43cb2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.hbase.index.write;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -27,10 +25,12 @@ import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class IndexWriterUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexWriterUtils.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriterUtils.class);
 
   /**
    * Maximum number of threads to allow per-table when writing. Each writer thread (from
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
index cba2459..4996ecc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
@@ -19,13 +19,13 @@ package org.apache.phoenix.hbase.index.write;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -34,7 +34,7 @@ import com.google.common.collect.Multimap;
  */
 public class KillServerOnFailurePolicy implements IndexFailurePolicy {
 
-  private static final Log LOG = LogFactory.getLog(KillServerOnFailurePolicy.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(KillServerOnFailurePolicy.class);
   private Abortable abortable;
   private Stoppable stoppable;
 
@@ -66,11 +66,11 @@ public class KillServerOnFailurePolicy implements IndexFailurePolicy {
     // notify the regionserver of the failure
     String msg =
         "Could not update the index table, killing server region because couldn't write to an index table";
-    LOG.error(msg, cause);
+    LOGGER.error(msg, cause);
     try {
       this.abortable.abort(msg, cause);
     } catch (Exception e) {
-      LOG.fatal("Couldn't abort this server to preserve index writes, "
+      LOGGER.error("Couldn't abort this server to preserve index writes, "
           + "attempting to hard kill the server");
       System.exit(1);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index fb96666..db7e6a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -24,8 +24,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -45,7 +45,7 @@ import com.google.common.collect.Multimap;
  */
 public class RecoveryIndexWriter extends IndexWriter {
 
-    private static final Log LOG = LogFactory.getLog(RecoveryIndexWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RecoveryIndexWriter.class);
     private Set<HTableInterfaceReference> nonExistingTablesList = new HashSet<HTableInterfaceReference>();
     private HBaseAdmin admin;
 
@@ -71,7 +71,7 @@ public class RecoveryIndexWriter extends IndexWriter {
         } catch (MultiIndexWriteFailureException e) {
             for (HTableInterfaceReference table : e.getFailedTables()) {
                 if (!admin.tableExists(table.getTableName())) {
-                    LOG.warn("Failure due to non existing table: " + table.getTableName());
+                    LOGGER.warn("Failure due to non existing table: " + table.getTableName());
                     nonExistingTablesList.add(table);
                 } else {
                     throw e;
@@ -101,7 +101,7 @@ public class RecoveryIndexWriter extends IndexWriter {
             ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
             HTableInterfaceReference table = tables.get(ptr);
             if (nonExistingTablesList.contains(table)) {
-                LOG.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
+                LOGGER.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
                 continue;
             }
             if (table == null) {
@@ -121,7 +121,7 @@ public class RecoveryIndexWriter extends IndexWriter {
             try {
                 admin.close();
             } catch (IOException e) {
-                LOG.error("Closing the admin failed: ", e);
+                LOGGER.error("Closing the admin failed: ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index fe13025..934e116 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -19,8 +19,6 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
@@ -43,6 +41,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -68,7 +68,7 @@ import com.google.common.collect.Multimap;
  * client.
  */
 public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
-    private static final Log LOG = LogFactory.getLog(TrackingParallelWriterIndexCommitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
 
     public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
     private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
@@ -170,15 +170,15 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                                 return Boolean.TRUE;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isTraceEnabled()) {
-                                    LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
+                                if (LOGGER.isTraceEnabled()) {
+                                    LOGGER.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                             + ignord);
                                 }
                             }
                         }
 
-                        if (LOG.isTraceEnabled()) {
-                            LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
+                        if (LOGGER.isTraceEnabled()) {
+                            LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference);
                         }
                         // if the client can retry index writes, then we don't need to retry here
                         HTableFactory factory;
@@ -215,7 +215,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
 
         List<Boolean> results = null;
         try {
-            LOG.debug("Waiting on index update tasks to complete...");
+            LOGGER.debug("Waiting on index update tasks to complete...");
             results = this.pool.submitUninterruptible(tasks);
         } catch (ExecutionException e) {
             throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e);
@@ -248,7 +248,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
 
     @Override
     public void stop(String why) {
-        LOG.info("Shutting down " + this.getClass().getSimpleName());
+        LOGGER.info("Shutting down " + this.getClass().getSimpleName());
         this.pool.stop(why);
         this.retryingFactory.shutdown();
         this.noRetriesFactory.shutdown();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index d3ae59a..a5fe10a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -29,8 +29,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -71,6 +69,8 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
@@ -85,7 +85,7 @@ import com.google.common.collect.Multimap;
  *
  */
 public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
-    private static final Log LOG = LogFactory.getLog(PhoenixIndexFailurePolicy.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexFailurePolicy.class);
     public static final String THROW_INDEX_WRITE_FAILURE = "THROW_INDEX_WRITE_FAILURE";
     public static final String DISABLE_INDEX_ON_WRITE_FAILURE = "DISABLE_INDEX_ON_WRITE_FAILURE";
     public static final String REBUILD_INDEX_ON_WRITE_FAILURE = "REBUILD_INDEX_ON_WRITE_FAILURE";
@@ -174,7 +174,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
             timestamp = handleFailureWithExceptions(attempted, cause);
             throwing = false;
         } catch (Throwable t) {
-            LOG.warn("handleFailure failed", t);
+            LOGGER.warn("handleFailure failed", t);
             super.handleFailure(attempted, cause);
             throwing = false;
         } finally {
@@ -188,7 +188,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 if (throwIndexWriteFailure) {
             		throw ioException;
             	} else {
-                    LOG.warn("Swallowing index write failure", ioException);
+                    LOGGER.warn("Swallowing index write failure", ioException);
             	}
             }
         }
@@ -282,24 +282,24 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                         MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
                                 systemTable, newState);
                         if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                            LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+                            LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
                             continue;
                         }
                         if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                             if (leaveIndexActive) {
-                                LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
                                         + result.getMutationCode());
                                 // If we're not disabling the index, then we don't want to throw as throwing
                                 // will lead to the RS being shutdown.
                                 if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
                                         "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
                             } else {
-                                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = "
+                                LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = "
                                         + result.getMutationCode() + ". Will use default failure policy instead.");
                                 throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                             }
                         }
-                        LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
+                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
                                 + " due to an exception while writing updates. indexState=" + newState,
                             cause);
                     } catch (Throwable t) {
@@ -351,7 +351,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                                         mutation.getRow().length - offset));
                 String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
                 if (indexTableName == null) {
-                    LOG.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
+                    LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of "
+                            + Bytes.toStringBinary(viewId));
                 } else {
                     indexTableNames.add(indexTableName);
                 }
@@ -437,7 +438,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 }
             }
         } catch (Exception handleE) {
-            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+            LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
         }
     }
 
@@ -532,7 +533,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 }
             }
         } catch (Exception handleE) {
-            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+            LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
         }
     }
 
@@ -571,11 +572,11 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
         decrementCounterForIndex(conn,indexFullName);
         Long indexDisableTimestamp = null;
         if (PIndexState.DISABLE.equals(indexState)) {
-            LOG.info("Disabling index after hitting max number of index write retries: "
+            LOGGER.info("Disabling index after hitting max number of index write retries: "
                     + indexFullName);
             IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp);
         } else if (PIndexState.ACTIVE.equals(indexState)) {
-            LOG.debug("Resetting index to active after subsequent success " + indexFullName);
+            LOGGER.debug("Resetting index to active after subsequent success " + indexFullName);
             //At server disabled timestamp will be reset only if there is no other client is in PENDING_DISABLE state
             indexDisableTimestamp = 0L;
             try {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index 56db39b..1748827 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -29,8 +29,6 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -60,6 +58,8 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
 import org.apache.phoenix.util.TransactionUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Do all the work of managing local index updates for a transactional table from a single coprocessor. Since the transaction
@@ -69,7 +69,7 @@ import org.apache.phoenix.util.TransactionUtil;
  */
 public class PhoenixTransactionalIndexer extends BaseRegionObserver {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixTransactionalIndexer.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransactionalIndexer.class);
 
     // Hack to get around not being able to save any state between
     // coprocessor calls. TODO: remove after HBASE-18127 when available
@@ -199,7 +199,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
             TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size());
         } catch (Throwable t) {
             String msg = "Failed to update index with entries:" + indexUpdates;
-            LOG.error(msg, t);
+            LOGGER.error(msg, t);
             ServerUtil.throwIOException(msg, t);
         }
     }
@@ -226,7 +226,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
             }
         } catch (Throwable t) {
             String msg = "Failed to write index updates:" + context.indexUpdates;
-            LOG.error(msg, t);
+            LOGGER.error(msg, t);
             ServerUtil.throwIOException(msg, t);
          } finally {
              removeBatchMutateContext(c);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 305d558..0fdaab8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -18,9 +18,6 @@
 
 package org.apache.phoenix.iterate;
 
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -33,6 +30,8 @@ import org.apache.hadoop.hbase.regionserver.*;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -42,7 +41,7 @@ import java.util.concurrent.ExecutorService;
 
 public class SnapshotScanner extends AbstractClientScanner {
 
-  private static final Log LOG = LogFactory.getLog(SnapshotScanner.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(SnapshotScanner.class);
 
   private RegionScanner scanner = null;
   private HRegion region;
@@ -89,7 +88,7 @@ public class SnapshotScanner extends AbstractClientScanner {
         this.scanner.close();
         this.scanner = null;
       } catch (IOException e) {
-        LOG.warn("Exception while closing scanner", e);
+        LOGGER.warn("Exception while closing scanner", e);
       }
     }
     if (this.region != null) {
@@ -98,7 +97,7 @@ public class SnapshotScanner extends AbstractClientScanner {
         this.region.close(true);
         this.region = null;
       } catch (IOException e) {
-        LOG.warn("Exception while closing scanner", e);
+        LOGGER.warn("Exception while closing scanner", e);
       }
     }
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 2669360..01d1072 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -33,8 +33,6 @@ import java.util.logging.Logger;
 
 import javax.annotation.concurrent.Immutable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.security.User;
@@ -69,7 +67,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
     /**
      * The protocol for Phoenix Network Client 
      */ 
-    private static final Log LOG = LogFactory.getLog(PhoenixEmbeddedDriver.class);
+    private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(PhoenixEmbeddedDriver.class);
     private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//";
     private final static String DRIVER_NAME = "PhoenixEmbeddedDriver";
     private static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -416,12 +414,12 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
             try {
                 return KerberosUtil.getDefaultRealm();
             } catch (Exception e) {
-                if (LOG.isDebugEnabled()) {
+                if (LOGGER.isDebugEnabled()) {
                     // Include the stacktrace at DEBUG
-                    LOG.debug(REALM_EQUIVALENCY_WARNING_MSG, e);
+                    LOGGER.debug(REALM_EQUIVALENCY_WARNING_MSG, e);
                 } else {
                     // Limit the content at WARN
-                    LOG.warn(REALM_EQUIVALENCY_WARNING_MSG);
+                    LOGGER.warn(REALM_EQUIVALENCY_WARNING_MSG);
                 }
             }
             return null;
@@ -633,7 +631,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                 throw getMalFormedUrlException(url);
             }
             String znodeParent = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            LOG.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
+            LOGGER.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
             return new ConnectionInfo(quorum, port, znodeParent);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index 2712205..723ef67 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -41,8 +41,6 @@ import java.text.Format;
 import java.util.Calendar;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.RowProjector;
@@ -73,6 +71,8 @@ import org.apache.phoenix.schema.types.PTinyint;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.SQLCloseable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Throwables;
@@ -107,7 +107,7 @@ import com.google.common.base.Throwables;
  */
 public class PhoenixResultSet implements ResultSet, SQLCloseable {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixResultSet.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixResultSet.class);
 
     private final static String STRING_FALSE = "0";
     private final static BigDecimal BIG_DECIMAL_FALSE = BigDecimal.valueOf(0);
@@ -878,7 +878,7 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable {
 
     @Override
     public void setFetchSize(int rows) throws SQLException {
-        LOG.warn("Ignoring setFetchSize(" + rows + ")");
+        LOGGER.warn("Ignoring setFetchSize(" + rows + ")");
     }
 
     @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
index ef5559c..7433f6a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
@@ -20,11 +20,11 @@ package org.apache.phoenix.log;
 import java.util.Map;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
@@ -41,7 +41,7 @@ public class QueryLogger {
     private LogLevel logLevel;
     private Builder<QueryLogInfo, Object> queryLogBuilder = ImmutableMap.builder();
     private boolean isSynced;
-    private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
     
     private QueryLogger(PhoenixConnection connection) {
         this.queryId = UUID.randomUUID().toString();
@@ -105,15 +105,15 @@ public class QueryLogger {
         try {
             queryLogBuilder.put(queryLogInfo, info);
         } catch (Exception e) {
-            LOG.warn("Unable to add log info because of " + e.getMessage());
+            LOGGER.warn("Unable to add log info because of " + e.getMessage());
         }
     }
     
     private boolean publishLogs(RingBufferEventTranslator translator) {
         if (queryDisruptor == null) { return false; }
         boolean isLogged = queryDisruptor.tryPublish(translator);
-        if (!isLogged && LOG.isDebugEnabled()) {
-            LOG.debug("Unable to write query log in table as ring buffer queue is full!!");
+        if (!isLogged && LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Unable to write query log in table as ring buffer queue is full!!");
         }
         return isLogged;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index 1f2240e..c4f227a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -24,10 +24,10 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.QueryServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.lmax.disruptor.BlockingWaitStrategy;
@@ -44,7 +44,7 @@ public class QueryLoggerDisruptor implements Closeable{
     private boolean isClosed = false;
     //number of elements to create within the ring buffer.
     private static final int RING_BUFFER_SIZE = 8 * 1024;
-    private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
     private static final String DEFAULT_WAIT_STRATEGY = BlockingWaitStrategy.class.getName();
     
     public QueryLoggerDisruptor(Configuration configuration) throws SQLException{
@@ -76,7 +76,7 @@ public class QueryLoggerDisruptor implements Closeable{
 
         final QueryLogDetailsEventHandler[] handlers = { new QueryLogDetailsEventHandler(configuration) };
         disruptor.handleEventsWith(handlers);
-        LOG.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
+        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
                 + ", waitStrategy=" + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
                 + errorHandler + "...");
         disruptor.start();
@@ -103,7 +103,7 @@ public class QueryLoggerDisruptor implements Closeable{
     @Override
     public void close() throws IOException {
         isClosed = true;
-        LOG.info("Shutting down QueryLoggerDisruptor..");
+        LOGGER.info("Shutting down QueryLoggerDisruptor..");
         try {
             //we can wait for 2 seconds, so that backlog can be committed
             disruptor.shutdown(2, TimeUnit.SECONDS);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
index 0209951..6a7c0b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
@@ -27,11 +27,11 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -40,7 +40,7 @@ import com.google.common.collect.ImmutableMap;
  * 
  */
 public class TableLogWriter implements LogWriter {
-    private static final Log LOG = LogFactory.getLog(LogWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LogWriter.class);
     private Connection connection;
     private boolean isClosed;
     private PreparedStatement upsertStatement;
@@ -84,7 +84,7 @@ public class TableLogWriter implements LogWriter {
     @Override
     public void write(RingBufferEvent event) throws SQLException, IOException, ClassNotFoundException {
         if (isClosed()) {
-            LOG.warn("Unable to commit query log as Log committer is already closed");
+            LOGGER.warn("Unable to commit query log as Log committer is already closed");
             return;
         }
         if (connection == null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 4561152..f2064b5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -74,7 +74,7 @@ import com.google.common.collect.Lists;
  */
 public abstract class AbstractBulkLoadTool extends Configured implements Tool {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(AbstractBulkLoadTool.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractBulkLoadTool.class);
 
     static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)");
     static final Option INPUT_PATH_OPT = new Option("i", "input", true, "Input path(s) (comma-separated, mandatory)");
@@ -191,18 +191,18 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job.
             String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt());
             PhoenixDriver.ConnectionInfo info = PhoenixDriver.ConnectionInfo.create(zkQuorum);
-            LOG.info("Configuring HBase connection to {}", info);
+            LOGGER.info("Configuring HBase connection to {}", info);
             for (Map.Entry<String,String> entry : info.asProps()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Setting {} = {}", entry.getKey(), entry.getValue());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Setting {} = {}", entry.getKey(), entry.getValue());
                 }
                 conf.set(entry.getKey(), entry.getValue());
             }
         }
 
         final Connection conn = QueryUtil.getConnection(conf);
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
                     qualifiedTableName);
         }
         List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName);
@@ -303,7 +303,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
         // give subclasses their hook
         setupJob(job);
 
-        LOG.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
+        LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
         boolean success = job.waitForCompletion(true);
 
         if (success) {
@@ -311,7 +311,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
                 try {
                     table = new HTable(job.getConfiguration(), qualifiedTableName);
                     if(!IndexUtil.matchingSplitKeys(splitKeysBeforeJob, table.getRegionLocator().getStartKeys())) {
-                        LOG.error("The table "
+                        LOGGER.error("The table "
                                 + qualifiedTableName
                                 + " has local indexes and there is split key mismatch before and"
                                 + " after running bulkload job. Please rerun the job otherwise"
@@ -322,11 +322,11 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
                     if (table != null) table.close();
                 }
             }
-            LOG.info("Loading HFiles from {}", outputPath);
+            LOGGER.info("Loading HFiles from {}", outputPath);
             completebulkload(conf,outputPath,tablesToBeLoaded);
-            LOG.info("Removing output directory {}", outputPath);
+            LOGGER.info("Removing output directory {}", outputPath);
             if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
-                LOG.error("Failed to delete the output directory {}", outputPath);
+                LOGGER.error("Failed to delete the output directory {}", outputPath);
             }
             return 0;
         } else {
@@ -345,9 +345,9 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             String tableName = table.getPhysicalName();
             Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputPath, tableName);
             try(HTable htable = new HTable(conf,tableName)) {
-                LOG.info("Loading HFiles for {} from {}", tableName , tableOutputPath);
+                LOGGER.info("Loading HFiles for {} from {}", tableName , tableOutputPath);
                 loader.doBulkLoad(tableOutputPath, htable);
-                LOG.info("Incremental load complete for table=" + tableName);
+                LOGGER.info("Incremental load complete for table=" + tableName);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index 93ab188..cc23c43 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -77,7 +77,7 @@ import com.google.common.collect.Lists;
 public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWritable, Text, TableRowkeyPair,
         ImmutableBytesWritable> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
 
     protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import";
 
@@ -396,7 +396,7 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
 
         @Override
         public void errorOnRecord(T record, Throwable throwable) {
-            LOG.error("Error on record " + record, throwable);
+            LOGGER.error("Error on record " + record, throwable);
             context.getCounter(COUNTER_GROUP_NAME, "Errors on records").increment(1L);
             if (!ignoreRecordErrors) {
                 Throwables.propagate(throwable);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
index 72af1a7..52d539b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
@@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory;
 public class FormatToKeyValueReducer
         extends Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(FormatToKeyValueReducer.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToKeyValueReducer.class);
 
 
     protected List<String> tableNames;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 30f21ce..a8de1d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -84,7 +84,7 @@ import com.google.common.collect.Sets;
  */
 public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Cell> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(MultiHfileOutputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultiHfileOutputFormat.class);
 
     private static final String COMPRESSION_FAMILIES_CONF_KEY =
         "hbase.hfileoutputformat.families.compression";
@@ -196,7 +196,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
           private void rollWriters() throws IOException {
               for (WriterLength wl : this.writers.values()) {
                   if (wl.writer != null) {
-                      LOG.info("Writer=" + wl.writer.getPath() +
+                      LOGGER.info("Writer=" + wl.writer.getPath() +
                               ((wl.written == 0)? "": ", wrote=" + wl.written));
                       close(wl.writer);
                   }
@@ -470,7 +470,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
     private static void writePartitions(Configuration conf, Path partitionsPath,
             Set<TableRowkeyPair> tablesStartKeys) throws IOException {
         
-        LOG.info("Writing partition information to " + partitionsPath);
+        LOGGER.info("Writing partition information to " + partitionsPath);
         if (tablesStartKeys.isEmpty()) {
           throw new IllegalArgumentException("No regions passed");
         }
@@ -688,11 +688,11 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
                conf.set(tableName, tableDefns);
                
                TargetTableRef tbl = TargetTableRefFunctions.FROM_JSON.apply(tableDefns);
-               LOG.info(" the table logical name is "+ tbl.getLogicalName());
+               LOGGER.info(" the table logical name is "+ tbl.getLogicalName());
            }
        }
     
-       LOG.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
+       LOGGER.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
        job.setNumReduceTasks(tablesStartKeys.size());
 
        configurePartitioner(job, tablesStartKeys);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index cf020e7..0eb1fce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -80,7 +80,7 @@ import org.slf4j.LoggerFactory;
  */
 public class OrphanViewTool extends Configured implements Tool {
     private static final String SYSTEM_CHILD_LINK_NAME = SYSTEM_CATALOG_NAME;
-    private static final Logger LOG = LoggerFactory.getLogger(OrphanViewTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewTool.class);
     // Query all the views that are not "MAPPED" views
     private static final String viewQuery = "SELECT " +
             TENANT_ID + ", " +
@@ -418,7 +418,7 @@ public class OrphanViewTool extends Configured implements Tool {
                         new DropTableStatement(pTableName, PTableType.VIEW, false, true));
             }
             catch (TableNotFoundException e) {
-                LOG.info("Ignoring view " + pTableName + " as it has already been dropped");
+                LOGGER.info("Ignoring view " + pTableName + " as it has already been dropped");
             }
         } finally {
             if (newConn) {
@@ -807,7 +807,7 @@ public class OrphanViewTool extends Configured implements Tool {
                 connection.close();
             }
         } catch (SQLException sqlE) {
-            LOG.error("Failed to close connection: ", sqlE);
+            LOGGER.error("Failed to close connection: ", sqlE);
             throw new RuntimeException("Failed to close connection with exception: ", sqlE);
         }
     }
@@ -881,7 +881,7 @@ public class OrphanViewTool extends Configured implements Tool {
             }
             return 0;
         } catch (Exception ex) {
-            LOG.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
+            LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
                     ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index aa94684..88c32f1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -25,8 +25,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
@@ -51,6 +49,8 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.HBaseFactoryProvider;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -61,7 +61,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWritable,T> {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixInputFormat.class);
        
     /**
      * instantiated by framework
@@ -122,8 +122,8 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
 
             if(splitByStats) {
                 for(Scan aScan: scans) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
                                 .getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" +
                                 aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan
                                 .getBatch() + "] and  regionLocation : " + regionLocation);
@@ -132,18 +132,18 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
                     psplits.add(new PhoenixInputSplit(Collections.singletonList(aScan), regionSize, regionLocation));
                 }
                 } else {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
                             .get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans
                             .size() - 1).getStopRow()));
-                    LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
+                    LOGGER.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
                             .get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " +
                             "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks()
                             + ", " + scans.get(0).getBatch() + "] and  regionLocation : " +
                             regionLocation);
 
                     for (int i = 0, limit = scans.size(); i < limit; i++) {
-                        LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
+                        LOGGER.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
                                 .toStringBinary(scans.get(i).getAttribute
                                         (BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
                     }
@@ -201,7 +201,7 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
               return queryPlan;
             }
         } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",
+            LOGGER.error(String.format("Failed to get the query plan with error [%s]",
                 exception.getMessage()));
             throw new RuntimeException(exception);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
index 4217e40..055ce1f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
@@ -22,8 +22,6 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -31,13 +29,15 @@ import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * {@link OutputFormat} implementation for Phoenix.
  *
  */
 public class PhoenixOutputFormat <T extends DBWritable> extends OutputFormat<NullWritable,T> {
-    private static final Log LOG = LogFactory.getLog(PhoenixOutputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixOutputFormat.class);
     private final Set<String> propsToIgnore;
     
     public PhoenixOutputFormat() {
@@ -65,7 +65,7 @@ public class PhoenixOutputFormat <T extends DBWritable> extends OutputFormat<Nul
         try {
             return new PhoenixRecordWriter<T>(context.getConfiguration(), propsToIgnore);
         } catch (SQLException e) {
-            LOG.error("Error calling PhoenixRecordWriter "  + e.getMessage());
+            LOGGER.error("Error calling PhoenixRecordWriter "  + e.getMessage());
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index 5a4bdb4..6db721d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -21,8 +21,6 @@ import java.io.IOException;
 import java.sql.SQLException;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -49,6 +47,8 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.monitoring.ReadMetricQueue;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.query.ConnectionQueryServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<NullWritable,T> {
     
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordReader.class);
     protected final Configuration  configuration;
     protected final QueryPlan queryPlan;
     private final ParallelScanGrouper scanGrouper;
@@ -86,7 +86,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
            try {
                resultIterator.close();
         } catch (SQLException e) {
-           LOG.error(" Error closing resultset.");
+           LOGGER.error(" Error closing resultset.");
            throw new RuntimeException(e);
         }
        }
@@ -158,7 +158,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
 
             this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
         } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
             Throwables.propagate(e);
         }
    }
@@ -179,7 +179,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
             value.readFields(resultSet);
             return true;
         } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
index b67ba74..6f5b84e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
@@ -24,8 +24,6 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.RecordWriter;
@@ -33,6 +31,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Default {@link RecordWriter} implementation from Phoenix
@@ -40,7 +40,7 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
  */
 public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<NullWritable, T> {
     
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordWriter.class);
     
     private final Connection conn;
     private final PreparedStatement statement;
@@ -73,7 +73,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
         try {
             conn.commit();
          } catch (SQLException e) {
-             LOG.error("SQLException while performing the commit for the task.");
+             LOGGER.error("SQLException while performing the commit for the task.");
              throw new RuntimeException(e);
           } finally {
             try {
@@ -81,7 +81,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
               conn.close();
             }
             catch (SQLException ex) {
-              LOG.error("SQLException while closing the connection for the task.");
+              LOGGER.error("SQLException while closing the connection for the task.");
               throw new RuntimeException(ex);
             }
           }
@@ -94,7 +94,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
             numRecords++;
             statement.execute();
             if (numRecords % batchSize == 0) {
-                LOG.debug("commit called on a batch of size : " + batchSize);
+                LOGGER.debug("commit called on a batch of size : " + batchSize);
                 conn.commit();
             }
         } catch (SQLException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
index f8ec393..76d5a83 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
@@ -22,8 +22,6 @@ import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -38,6 +36,8 @@ import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.schema.*;
 import org.apache.phoenix.util.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolDataTableName;
@@ -50,7 +50,7 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getInde
 public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends PhoenixInputFormat {
     QueryPlan queryPlan = null;
 
-    private static final Log LOG = LogFactory.getLog(PhoenixServerBuildIndexInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
 
     /**
      * instantiated by framework
@@ -103,7 +103,7 @@ public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends Ph
             queryPlan.iterator(MapReduceParallelScanGrouper.getInstance());
             return queryPlan;
         } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",
+            LOGGER.error(String.format("Failed to get the query plan with error [%s]",
                     exception.getMessage()));
             throw new RuntimeException(exception);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
index f63923d..ce8d550 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
@@ -46,7 +46,7 @@ import com.google.common.base.Preconditions;
  */
 public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(RegexToKeyValueMapper.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(RegexToKeyValueMapper.class);
 
     /** Configuration key for the regex */
     public static final String REGEX_CONFKEY = "phoenix.mapreduce.import.regex";
@@ -110,7 +110,8 @@ public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>
 			Map<String, Object> data = new HashMap<>();
 			Matcher m = inputPattern.matcher(input);
 			if (m.groupCount() != columnInfoList.size()) {
-				LOG.debug(String.format("based on the regex and input, input fileds %s size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
+				LOGGER.debug(String.format("based on the regex and input, input fileds %s " +
+                        "size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
 				return data;
 			}
 			
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
index 32d2f3b..77825cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
@@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
  * Writes mutations directly to HBase using HBase front-door APIs.
  */
 public class DirectHTableWriter {
-    private static final Logger LOG = LoggerFactory.getLogger(DirectHTableWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(DirectHTableWriter.class);
 
     private Configuration conf = null;
 
@@ -54,9 +54,9 @@ public class DirectHTableWriter {
         try {
             this.table = new HTable(this.conf, tableName);
             this.table.setAutoFlush(false, true);
-            LOG.info("Created table instance for " + tableName);
+            LOGGER.info("Created table instance for " + tableName);
         } catch (IOException e) {
-            LOG.error("IOException : ", e);
+            LOGGER.error("IOException : ", e);
             tryClosingResourceSilently(this.table);
             throw new RuntimeException(e);
         }
@@ -80,7 +80,7 @@ public class DirectHTableWriter {
             try {
                 res.close();
             } catch (IOException e) {
-                LOG.error("Closing resource: " + res + " failed with error: ", e);
+                LOGGER.error("Closing resource: " + res + " failed with error: ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
index c651077..da6e6e1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
@@ -59,7 +59,7 @@ import com.google.common.base.Joiner;
  */
 public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWritable, Text, Text> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexScrutinyMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyMapper.class);
     private Connection connection;
     private List<ColumnInfo> targetTblColumnMetadata;
     private long batchSize;
@@ -146,7 +146,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
                     PhoenixRuntime.generateColumnInfo(connection, qTargetTable, targetColNames);
             sourceTblColumnMetadata =
                     PhoenixRuntime.generateColumnInfo(connection, qSourceTable, sourceColNames);
-            LOG.info("Target table base query: " + targetTableQuery);
+            LOGGER.info("Target table base query: " + targetTableQuery);
             md5 = MessageDigest.getInstance("MD5");
         } catch (SQLException | NoSuchAlgorithmException e) {
             tryClosingResourceSilently(this.outputUpsertStmt);
@@ -161,7 +161,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
             try {
                 res.close();
             } catch (Exception e) {
-                LOG.error("Closing resource: " + res + " failed :", e);
+                LOGGER.error("Closing resource: " + res + " failed :", e);
             }
         }
     }
@@ -184,7 +184,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
             }
             context.progress(); // Make sure progress is reported to Application Master.
         } catch (SQLException | IllegalArgumentException e) {
-            LOG.error(" Error while read/write of a record ", e);
+            LOGGER.error(" Error while read/write of a record ", e);
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new IOException(e);
         }
@@ -200,7 +200,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
                 processBatch(context);
                 connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
                 throwException = new IOException(e);
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index d9a14bf..1636a4e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -72,7 +72,7 @@ import com.google.common.collect.Lists;
  */
 public class IndexScrutinyTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexScrutinyTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyTool.class);
 
     private static final Option SCHEMA_NAME_OPTION =
             new Option("s", "schema", true, "Phoenix schema name (optional)");
@@ -264,7 +264,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
             final String selectQuery =
                     QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, null,
                         Hint.NO_INDEX, true);
-            LOG.info("Query used on source table to feed the mapper: " + selectQuery);
+            LOGGER.info("Query used on source table to feed the mapper: " + selectQuery);
 
             PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat);
             // if outputting to table, setup the upsert to the output table
@@ -273,7 +273,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                         IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols,
                             targetDynamicCols, connection);
                 PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt);
-                LOG.info("Upsert statement used for output table: " + upsertStmt);
+                LOGGER.info("Upsert statement used for output table: " + upsertStmt);
             }
 
             final String jobName =
@@ -415,7 +415,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                 }
             }
 
-            LOG.info(String.format(
+            LOGGER.info(String.format(
                 "Running scrutiny [schemaName=%s, dataTable=%s, indexTable=%s, useSnapshot=%s, timestamp=%s, batchSize=%s, outputBasePath=%s, outputFormat=%s, outputMaxRows=%s]",
                 schemaName, dataTable, indexTable, useSnapshot, ts, batchSize, basePath,
                 outputFormat, outputMaxRows));
@@ -435,13 +435,13 @@ public class IndexScrutinyTool extends Configured implements Tool {
             }
 
             if (!isForeground) {
-                LOG.info("Running Index Scrutiny in Background - Submit async and exit");
+                LOGGER.info("Running Index Scrutiny in Background - Submit async and exit");
                 for (Job job : jobs) {
                     job.submit();
                 }
                 return 0;
             }
-            LOG.info(
+            LOGGER.info(
                 "Running Index Scrutiny in Foreground. Waits for the build to complete. This may take a long time!.");
             boolean result = true;
             for (Job job : jobs) {
@@ -450,7 +450,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
 
             // write the results to the output metadata table
             if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) {
-                LOG.info("Writing results of jobs to output table "
+                LOGGER.info("Writing results of jobs to output table "
                         + IndexScrutinyTableOutput.OUTPUT_METADATA_TABLE_NAME);
                 IndexScrutinyTableOutput.writeJobResults(connection, args, jobs);
             }
@@ -458,11 +458,11 @@ public class IndexScrutinyTool extends Configured implements Tool {
             if (result) {
                 return 0;
             } else {
-                LOG.error("IndexScrutinyTool job failed! Check logs for errors..");
+                LOGGER.error("IndexScrutinyTool job failed! Check logs for errors..");
                 return -1;
             }
         } catch (Exception ex) {
-            LOG.error("An exception occurred while performing the indexing job: "
+            LOGGER.error("An exception occurred while performing the indexing job: "
                     + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
@@ -471,7 +471,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                     connection.close();
                 }
             } catch (SQLException sqle) {
-                LOG.error("Failed to close connection ", sqle.getMessage());
+                LOGGER.error("Failed to close connection ", sqle.getMessage());
                 throw new RuntimeException("Failed to close connection");
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 9938a98..c8096c7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -108,7 +108,7 @@ import com.google.common.collect.Lists;
  */
 public class IndexTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexTool.class);
 
     private String schemaName;
     private String dataTable;
@@ -624,7 +624,9 @@ public class IndexTool extends Configured implements Tool {
                     int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt);
                     String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt());
                     double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt);
-                    LOG.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
+                    LOGGER.info(String.format(
+                            "Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s",
+                            indexTable, autosplit, autosplitNumRegions, samplingRate));
 
                     splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, samplingRate, configuration);
                 }
@@ -642,11 +644,11 @@ public class IndexTool extends Configured implements Tool {
             job = jobFactory.getJob();
 
             if (!isForeground && useDirectApi) {
-                LOG.info("Running Index Build in Background - Submit async and exit");
+                LOGGER.info("Running Index Build in Background - Submit async and exit");
                 job.submit();
                 return 0;
             }
-            LOG.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
+            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
             boolean result = job.waitForCompletion(true);
             
             if (result) {
@@ -654,7 +656,7 @@ public class IndexTool extends Configured implements Tool {
                     if (isLocalIndexBuild) {
                         validateSplitForLocalIndex(splitKeysBeforeJob, htable);
                     }
-                    LOG.info("Loading HFiles from {}", outputPath);
+                    LOGGER.info("Loading HFiles from {}", outputPath);
                     LoadIncrementalHFiles loader = new LoadIncrementalHFiles(configuration);
                     loader.doBulkLoad(outputPath, htable);
                     htable.close();
@@ -664,11 +666,11 @@ public class IndexTool extends Configured implements Tool {
                 }
                 return 0;
             } else {
-                LOG.error("IndexTool job failed! Check logs for errors..");
+                LOGGER.error("IndexTool job failed! Check logs for errors..");
                 return -1;
             }
         } catch (Exception ex) {
-            LOG.error("An exception occurred while performing the indexing job: "
+            LOGGER.error("An exception occurred while performing the indexing job: "
                     + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
@@ -678,7 +680,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         connection.close();
                     } catch (SQLException e) {
-                        LOG.error("Failed to close connection ", e);
+                        LOGGER.error("Failed to close connection ", e);
                         rethrowException = true;
                     }
                 }
@@ -686,7 +688,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         htable.close();
                     } catch (IOException e) {
-                        LOG.error("Failed to close htable ", e);
+                        LOGGER.error("Failed to close htable ", e);
                         rethrowException = true;
                     }
                 }
@@ -694,7 +696,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         jobFactory.closeConnection();
                     } catch (SQLException e) {
-                        LOG.error("Failed to close jobFactory ", e);
+                        LOGGER.error("Failed to close jobFactory ", e);
                         rethrowException = true;
                     }
                 }
@@ -715,7 +717,7 @@ public class IndexTool extends Configured implements Tool {
                         .getTable(pDataTable.getPhysicalName().getBytes())) {
             numRegions = hDataTable.getRegionLocator().getStartKeys().length;
             if (autosplit && !(numRegions > autosplitNumRegions)) {
-                LOG.info(String.format(
+                LOGGER.info(String.format(
                     "Will not split index %s because the data table only has %s regions, autoSplitNumRegions=%s",
                     pIndexTable.getPhysicalName(), numRegions, autosplitNumRegions));
                 return; // do nothing if # of regions is too low
@@ -801,7 +803,7 @@ public class IndexTool extends Configured implements Tool {
             String errMsg = "The index to build is local index and the split keys are not matching"
                     + " before and after running the job. Please rerun the job otherwise"
                     + " there may be inconsistencies between actual data and index data";
-            LOG.error(errMsg);
+            LOGGER.error(errMsg);
             throw new Exception(errMsg);
         }
         return true;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
index 2dc7551..98ac5e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
@@ -38,7 +38,7 @@ public class IndexToolUtil {
 
 	private static final String ALTER_INDEX_QUERY_TEMPLATE = "ALTER INDEX IF EXISTS %s ON %s %s";  
     
-	private static final Logger LOG = LoggerFactory.getLogger(IndexToolUtil.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(IndexToolUtil.class);
 	
 	/**
 	 * Updates the index state.
@@ -74,7 +74,7 @@ public class IndexToolUtil {
         Preconditions.checkNotNull(connection);
         final String alterQuery = String.format(ALTER_INDEX_QUERY_TEMPLATE,indexTable,masterTable,state.name());
         connection.createStatement().execute(alterQuery);
-        LOG.info(" Updated the status of the index {} to {} " , indexTable , state.name());
+        LOGGER.info(" Updated the status of the index {} to {} " , indexTable , state.name());
     }
 	
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
index 7328014..5ada131 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
 public class PhoenixIndexImportDirectMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class);
 
     private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable();
 
@@ -98,7 +98,7 @@ public class PhoenixIndexImportDirectMapper extends
             //Get batch size in terms of bytes
             batchSizeBytes = ((PhoenixConnection) connection).getMutateBatchSizeBytes();
 
-            LOG.info("Mutation Batch Size = " + batchSize);
+            LOGGER.info("Mutation Batch Size = " + batchSize);
 
             final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration);
             this.pStatement = connection.prepareStatement(upsertQuery);
@@ -136,7 +136,7 @@ public class PhoenixIndexImportDirectMapper extends
             // Make sure progress is reported to Application Master.
             context.progress();
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         }
@@ -173,7 +173,7 @@ public class PhoenixIndexImportDirectMapper extends
                 new IntWritable(0));
             super.cleanup(context);
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         } finally {
@@ -186,7 +186,7 @@ public class PhoenixIndexImportDirectMapper extends
             try {
                 this.connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
         if (this.writer != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 51b88c1..b304dde 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -35,8 +35,8 @@ import org.slf4j.LoggerFactory;
 public class PhoenixIndexImportDirectReducer extends
         Reducer<ImmutableBytesWritable, IntWritable, NullWritable, NullWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
     private Configuration configuration;
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
 
     /**
      * Called once at the start of the task.
@@ -53,7 +53,7 @@ public class PhoenixIndexImportDirectReducer extends
         try {
             IndexToolUtil.updateIndexState(configuration, PIndexState.ACTIVE);
         } catch (SQLException e) {
-            LOG.error(" Failed to update the status to Active");
+            LOGGER.error(" Failed to update the status to Active");
             throw new RuntimeException(e.getMessage());
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
index e060bc3..aa693e6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  */
 public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, KeyValue> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportMapper.class);
     
     private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable();
     
@@ -118,7 +118,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             }
             connection.rollback();
        } catch (SQLException e) {
-           LOG.error("Error {}  while read/write of a record ",e.getMessage());
+           LOGGER.error("Error {}  while read/write of a record ",e.getMessage());
            context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
            throw new RuntimeException(e);
         } 
@@ -135,7 +135,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             try {
                 connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index 2077137..b168032 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
 
     private PhoenixConnection connection;
 
@@ -92,7 +92,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                     services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                         QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
             batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
-            LOG.info("Mutation Batch Size = " + batchSize);
+            LOGGER.info("Mutation Batch Size = " + batchSize);
             this.mutations = Lists.newArrayListWithExpectedSize(batchSize);
             maintainers=new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration));
         } catch (SQLException e) {
@@ -142,7 +142,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
             // Make sure progress is reported to Application Master.
             context.progress();
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         }
@@ -167,7 +167,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                 new IntWritable(0));
             super.cleanup(context);
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         } finally {
@@ -180,7 +180,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
             try {
                 this.connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
         if (this.writer != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
index 34bcc9b..0544d02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
 public class PhoenixServerBuildIndexMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
 
     @Override
     protected void setup(final Context context) throws IOException, InterruptedException {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 31e657a..954ee23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -39,8 +39,6 @@ import java.util.concurrent.TimeoutException;
 
 import javax.security.auth.login.AppConfigurationEntry;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -60,6 +58,8 @@ import org.apache.phoenix.util.UpgradeUtil;
 import org.apache.phoenix.util.ZKBasedMasterElectionUtil;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
@@ -116,7 +116,7 @@ public class PhoenixMRJobSubmitter {
     private static final int JOB_SUBMIT_POOL_TIMEOUT = 5;
     private Configuration conf;
     private String zkQuorum;
-    private static final Log LOG = LogFactory.getLog(PhoenixMRJobSubmitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobSubmitter.class);
 
     public PhoenixMRJobSubmitter() throws IOException {
         this(null);
@@ -158,11 +158,11 @@ public class PhoenixMRJobSubmitter {
 
         switch (type) {
         case CAPACITY:
-            LOG.info("Applying the Capacity Scheduler Queue Configurations");
+            LOGGER.info("Applying the Capacity Scheduler Queue Configurations");
             PhoenixMRJobUtil.updateCapacityQueueInfo(conf);
             break;
         case FAIR:
-            LOG.warn("Fair Scheduler type is not yet supported");
+            LOGGER.warn("Fair Scheduler type is not yet supported");
             throw new IOException("Fair Scheduler is not yet supported");
         case NONE:
         default:
@@ -184,7 +184,7 @@ public class PhoenixMRJobSubmitter {
         AppConfigurationEntry entries[] =
                 javax.security.auth.login.Configuration.getConfiguration()
                         .getAppConfigurationEntry("Client");
-        LOG.info("Security - Fetched App Login Configuration Entries");
+        LOGGER.info("Security - Fetched App Login Configuration Entries");
         if (entries != null) {
             for (AppConfigurationEntry entry : entries) {
                 if (entry.getOptions().get(PRINCIPAL) != null) {
@@ -194,12 +194,12 @@ public class PhoenixMRJobSubmitter {
                     keyTabPath = (String) entry.getOptions().get(KEYTAB);
                 }
             }
-            LOG.info("Security - Got Principal = " + principal + "");
+            LOGGER.info("Security - Got Principal = " + principal + "");
             if (principal != null && keyTabPath != null) {
-                LOG.info("Security - Retreiving the TGT with principal:" + principal
+                LOGGER.info("Security - Retreiving the TGT with principal:" + principal
                         + " and keytab:" + keyTabPath);
                 UserGroupInformation.loginUserFromKeytab(principal, keyTabPath);
-                LOG.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
+                LOGGER.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
                         + keyTabPath);
             }
         }
@@ -237,7 +237,7 @@ public class PhoenixMRJobSubmitter {
 
         if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
             AUTO_INDEX_BUILD_LOCK_NAME)) {
-            LOG.info("Some other node is already running Automated Index Build. Skipping execution!");
+            LOGGER.info("Some other node is already running Automated Index Build. Skipping execution!");
             return -1;
         }
         // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
@@ -247,22 +247,22 @@ public class PhoenixMRJobSubmitter {
 
         // Get Candidate indexes to be built
         Map<String, PhoenixAsyncIndex> candidateJobs = getCandidateJobs();
-        LOG.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
+        LOGGER.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
 
         // Get already scheduled Jobs list from Yarn Resource Manager
         Set<String> submittedJobs = getSubmittedYarnApps();
-        LOG.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
+        LOGGER.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
 
         // Get final jobs to submit
         Set<PhoenixAsyncIndex> jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs);
 
-        LOG.info("Final indexes to be built - " + jobsToSchedule);
+        LOGGER.info("Final indexes to be built - " + jobsToSchedule);
         List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(jobsToSchedule.size());
 
         int failedJobSubmissionCount = 0;
         int timedoutJobSubmissionCount = 0;
         ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10);
-        LOG.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
+        LOGGER.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
 
         try {
             for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) {
@@ -285,7 +285,7 @@ public class PhoenixMRJobSubmitter {
             PhoenixMRJobUtil.shutdown(jobSubmitPool);
         }
 
-        LOG.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = "
+        LOGGER.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = "
                 + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount
                 + " ; Timed out = " + timedoutJobSubmissionCount);
         return failedJobSubmissionCount;
@@ -312,7 +312,7 @@ public class PhoenixMRJobSubmitter {
                 + "," + YarnApplication.state.RUNNING);
         int rmPort = PhoenixMRJobUtil.getRMPort(conf);
         String response = PhoenixMRJobUtil.getJobsInformationFromRM(rmHost, rmPort, urlParams);
-        LOG.debug("Already Submitted/Running Apps = " + response);
+        LOGGER.debug("Already Submitted/Running Apps = " + response);
         JSONObject jobsJson = new JSONObject(response);
         JSONObject appsJson = jobsJson.optJSONObject(YarnApplication.APPS_ELEMENT);
         Set<String> yarnApplicationSet = new HashSet<String>();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index 0ff7904..c756aae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -68,7 +68,7 @@ import com.google.common.collect.Lists;
  */
 public final class PhoenixConfigurationUtil {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixConfigurationUtil.class);
 
     public static final String SESSION_ID = "phoenix.sessionid";
     
@@ -307,7 +307,7 @@ public final class PhoenixConfigurationUtil {
             List<String> upsertColumnList =
                     PhoenixConfigurationUtil.getUpsertColumnNames(configuration);
             if(!upsertColumnList.isEmpty()) {
-                LOG.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s,"
+                LOGGER.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s,"
                                 + " upsertColumnList=%s ",!upsertColumnList.isEmpty(),
                         upsertColumnList.size(), Joiner.on(",").join(upsertColumnList)));
             }
@@ -332,11 +332,11 @@ public final class PhoenixConfigurationUtil {
         if (!upsertColumnNames.isEmpty()) {
             // Generating UPSERT statement without column name information.
             upsertStmt = QueryUtil.constructUpsertStatement(tableName, columnMetadataList);
-            LOG.info("Phoenix Custom Upsert Statement: "+ upsertStmt);
+            LOGGER.info("Phoenix Custom Upsert Statement: "+ upsertStmt);
         } else {
             // Generating UPSERT statement without column name information.
             upsertStmt = QueryUtil.constructGenericUpsertStatement(tableName, columnMetadataList.size());
-            LOG.info("Phoenix Generic Upsert Statement: " + upsertStmt);
+            LOGGER.info("Phoenix Generic Upsert Statement: " + upsertStmt);
         }
         configuration.set(UPSERT_STATEMENT, upsertStmt);
         return upsertStmt;
@@ -377,7 +377,7 @@ public final class PhoenixConfigurationUtil {
             final Configuration configuration) {
     	List<String> selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration);
         if(!selectColumnList.isEmpty()) {
-            LOG.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
+            LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
                     ,!selectColumnList.isEmpty(), selectColumnList.size(), Joiner.on(",").join(selectColumnList)
                     ));
         }
@@ -395,7 +395,7 @@ public final class PhoenixConfigurationUtil {
         final List<ColumnInfo> columnMetadataList = getSelectColumnMetadataList(configuration);
         final String conditions = configuration.get(INPUT_TABLE_CONDITIONS);
         selectStmt = QueryUtil.constructSelectStatement(tableName, columnMetadataList, conditions);
-        LOG.info("Select Statement: "+ selectStmt);
+        LOGGER.info("Select Statement: "+ selectStmt);
         configuration.set(SELECT_STATEMENT, selectStmt);
         return selectStmt;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
index 24950c4..86b54df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
@@ -17,14 +17,14 @@
  */
 package org.apache.phoenix.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class Metrics {
 
-    private static final Log LOG = LogFactory.getLog(Metrics.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Metrics.class);
 
   private static volatile MetricsSystem manager = DefaultMetricsSystem.instance();
 
@@ -35,13 +35,13 @@ public class Metrics {
     public static MetricsSystem initialize() {
         // if the jars aren't on the classpath, then we don't start the metrics system
         if (manager == null) {
-            LOG.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
+            LOGGER.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
             return null;
         }
         // only initialize the metrics system once
         synchronized (Metrics.class) {
             if (!initialized) {
-                LOG.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
+                LOGGER.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
                 manager.init(Metrics.METRICS_SYSTEM_NAME);
                 initialized = true;
             }
@@ -60,7 +60,7 @@ public class Metrics {
 
     public static void ensureConfigured() {
         if (!sinkInitialized) {
-            LOG.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
+            LOGGER.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
         }
     }
 }
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 788e2dd..3919a7c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -80,7 +78,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
     private final RegionCoprocessorEnvironment env;
     private long guidePostDepth;
     private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
-    private static final Log LOG = LogFactory.getLog(DefaultStatisticsCollector.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(DefaultStatisticsCollector.class);
     private ImmutableBytesWritable currentRow;
     private final long clientTimeStamp;
     private final String tableName;
@@ -186,7 +184,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
                     try {
                         htable.close();
                     } catch (IOException e) {
-                        LOG.warn("Failed to close " + htable.getName(), e);
+                        LOGGER.warn("Failed to close " + htable.getName(), e);
                     }
                 }
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 2fb6f14..f0b7998 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -25,8 +25,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -38,12 +36,14 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The scanner that does the scanning to collect the stats during major compaction.{@link DefaultStatisticsCollector}
  */
 public class StatisticsScanner implements InternalScanner {
-    private static final Log LOG = LogFactory.getLog(StatisticsScanner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsScanner.class);
     private InternalScanner delegate;
     private StatisticsWriter statsWriter;
     private Region region;
@@ -95,7 +95,7 @@ public class StatisticsScanner implements InternalScanner {
         StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config);
         StatisticsScannerCallable callable = createCallable();
         if (getRegionServerServices().isStopping() || getRegionServerServices().isStopped()) {
-            LOG.debug("Not updating table statistics because the server is stopping/stopped");
+            LOGGER.debug("Not updating table statistics because the server is stopping/stopped");
             return;
         }
         if (!async) {
@@ -149,26 +149,26 @@ public class StatisticsScanner implements InternalScanner {
                 // Just verify if this if fine
                 ArrayList<Mutation> mutations = new ArrayList<Mutation>();
 
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations);
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().addStats(tracker, family, mutations);
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().commitStats(mutations, tracker);
             } catch (IOException e) {
                 if (getRegionServerServices().isStopping() || getRegionServerServices().isStopped()) {
-                    LOG.debug("Ignoring error updating statistics because region is closing/closed");
+                    LOGGER.debug("Ignoring error updating statistics because region is closing/closed");
                 } else {
-                    LOG.error("Failed to update statistics table!", e);
+                    LOGGER.error("Failed to update statistics table!", e);
                     toThrow = e;
                 }
             } finally {
@@ -178,14 +178,14 @@ public class StatisticsScanner implements InternalScanner {
                     getTracker().close();// close the tracker
                 } catch (IOException e) {
                     if (toThrow == null) toThrow = e;
-                    LOG.error("Error while closing the stats table", e);
+                    LOGGER.error("Error while closing the stats table", e);
                 } finally {
                     // close the delegate scanner
                     try {
                         getDelegate().close();
                     } catch (IOException e) {
                         if (toThrow == null) toThrow = e;
-                        LOG.error("Error while closing the scanner", e);
+                        LOGGER.error("Error while closing the scanner", e);
                     } finally {
                         if (toThrow != null) { throw toThrow; }
                     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
index 865d210..ed35ec1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -234,7 +234,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         }
     }
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixCanaryTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixCanaryTool.class);
 
     private static String getCurrentTimestamp() {
         return new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.ms").format(new Date());
@@ -289,7 +289,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         try {
             Namespace cArgs = parseArgs(args);
             if (cArgs == null) {
-                LOG.error("Argument parsing failed.");
+                LOGGER.error("Argument parsing failed.");
                 throw new RuntimeException("Argument parsing failed");
             }
 
@@ -326,7 +326,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             connection = getConnectionWithRetry(connectionURL);
 
             if (connection == null) {
-                LOG.error("Failed to get connection after multiple retries; the connection is null");
+                LOGGER.error("Failed to get connection after multiple retries; the connection is null");
             }
 
             SimpleTimeLimiter limiter = new SimpleTimeLimiter();
@@ -338,10 +338,10 @@ public class PhoenixCanaryTool extends Configured implements Tool {
                     sink.clearResults();
 
                     // Execute tests
-                    LOG.info("Starting UpsertTableTest");
+                    LOGGER.info("Starting UpsertTableTest");
                     sink.updateResults(new UpsertTableTest().runTest(connection));
 
-                    LOG.info("Starting ReadTableTest");
+                    LOGGER.info("Starting ReadTableTest");
                     sink.updateResults(new ReadTableTest().runTest(connection));
                     return null;
 
@@ -354,7 +354,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             appInfo.setSuccessful(true);
 
         } catch (Exception e) {
-            LOG.error(Throwables.getStackTraceAsString(e));
+            LOGGER.error(Throwables.getStackTraceAsString(e));
             appInfo.setMessage(Throwables.getStackTraceAsString(e));
             appInfo.setSuccessful(false);
 
@@ -372,11 +372,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         try{
             connection = getConnectionWithRetry(connectionURL, true);
         } catch (Exception e) {
-            LOG.info("Failed to get connection with namespace enabled", e);
+            LOGGER.info("Failed to get connection with namespace enabled", e);
             try {
                 connection = getConnectionWithRetry(connectionURL, false);
             } catch (Exception ex) {
-                LOG.info("Failed to get connection without namespace enabled", ex);
+                LOGGER.info("Failed to get connection without namespace enabled", ex);
             }
         }
         return connection;
@@ -392,7 +392,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
 
         RetryCounter retrier = new RetryCounter(MAX_CONNECTION_ATTEMPTS,
                 FIRST_TIME_RETRY_TIMEOUT, TimeUnit.MILLISECONDS);
-        LOG.info("Trying to get the connection with "
+        LOGGER.info("Trying to get the connection with "
                 + retrier.getMaxAttempts() + " attempts with "
                 + "connectionURL :" + connectionURL
                 + "connProps :" + connProps);
@@ -400,11 +400,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             try {
                 connection = DriverManager.getConnection(connectionURL, connProps);
             } catch (SQLException e) {
-                LOG.info("Trying to establish connection with "
+                LOGGER.info("Trying to establish connection with "
                         + retrier.getAttemptTimes() + " attempts", e);
             }
             if (connection != null) {
-                LOG.info("Successfully established connection within "
+                LOGGER.info("Successfully established connection within "
                         + retrier.getAttemptTimes() + " attempts");
                 break;
             }
@@ -415,11 +415,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
 
     public static void main(final String[] args) {
         try {
-            LOG.info("Starting Phoenix Canary Test tool...");
+            LOGGER.info("Starting Phoenix Canary Test tool...");
             ToolRunner.run(new PhoenixCanaryTool(), args);
         } catch (Exception e) {
-            LOG.error("Error in running Phoenix Canary Test tool. " + e);
+            LOGGER.error("Error in running Phoenix Canary Test tool. " + e);
         }
-        LOG.info("Exiting Phoenix Canary Test tool...");
+        LOGGER.info("Exiting Phoenix Canary Test tool...");
     }
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
index fea6d61..a2b0e4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
@@ -35,8 +35,6 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.metrics2.AbstractMetric;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -78,7 +78,7 @@ import com.google.common.base.Joiner;
  */
 public class PhoenixMetricsSink implements MetricsSink {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMetricsSink.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsSink.class);
 
     private static final String VARIABLE_VALUE = "?";
 
@@ -102,14 +102,14 @@ public class PhoenixMetricsSink implements MetricsSink {
     private String table;
     
     public PhoenixMetricsSink() {
-        LOG.info("Writing tracing metrics to phoenix table");
+        LOGGER.info("Writing tracing metrics to phoenix table");
 
     }
 
     @Override
     public void init(SubsetConfiguration config) {
         Metrics.markSinkInitialized();
-        LOG.info("Phoenix tracing writer started");
+        LOGGER.info("Phoenix tracing writer started");
     }
 
     /**
@@ -210,7 +210,7 @@ public class PhoenixMetricsSink implements MetricsSink {
         try {
             this.conn.commit();
         } catch (SQLException e) {
-            LOG.error("Failed to commit changes to table", e);
+            LOGGER.error("Failed to commit changes to table", e);
         }
     }
 
@@ -270,7 +270,7 @@ public class PhoenixMetricsSink implements MetricsSink {
             } else if (tag.name().equals("Context")) {
                 // ignored
             } else {
-                LOG.error("Got an unexpected tag: " + tag);
+                LOGGER.error("Got an unexpected tag: " + tag);
             }
         }
 
@@ -286,9 +286,9 @@ public class PhoenixMetricsSink implements MetricsSink {
         stmt += COMMAS.join(keys);
         stmt += ") VALUES (" + COMMAS.join(values) + ")";
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Logging metrics to phoenix table via: " + stmt);
-            LOG.trace("With tags: " + variableValues);
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Logging metrics to phoenix table via: " + stmt);
+            LOGGER.trace("With tags: " + variableValues);
         }
         try {
             PreparedStatement ps = conn.prepareStatement(stmt);
@@ -304,7 +304,7 @@ public class PhoenixMetricsSink implements MetricsSink {
             MutationState newState = plan.execute();
             state.join(newState);
         } catch (SQLException e) {
-            LOG.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
+            LOGGER.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
                     e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 68b945c..88cc642 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -37,6 +35,8 @@ import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.LogUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.primitives.Longs;
@@ -46,7 +46,7 @@ import com.google.common.primitives.Longs;
  */
 public class TraceReader {
 
-    private static final Log LOG = LogFactory.getLog(TraceReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceReader.class);
     private final Joiner comma = Joiner.on(',');
     private String knownColumns;
     {
@@ -146,7 +146,7 @@ public class TraceReader {
                     orphan.parent = spanInfo;
                     spanInfo.children.add(orphan);
                     // / its no longer an orphan
-                    LOG.trace(addCustomAnnotations("Found parent for span: " + span));
+                    LOGGER.trace(addCustomAnnotations("Found parent for span: " + span));
                     orphans.remove(i--);
                 }
             }
@@ -156,7 +156,7 @@ public class TraceReader {
                 parentSpan.children.add(spanInfo);
             } else if (parent != Span.ROOT_SPAN_ID) {
                 // add the span to the orphan pile to check for the remaining spans we see
-                LOG.info(addCustomAnnotations("No parent span found for span: " + span + " (root span id: "
+                LOGGER.info(addCustomAnnotations("No parent span found for span: " + span + " (root span id: "
                         + Span.ROOT_SPAN_ID + ")"));
                 orphans.add(spanInfo);
             }
@@ -213,7 +213,7 @@ public class TraceReader {
                         + MetricInfo.TRACE.columnName + "=" + traceid + " AND "
                         + MetricInfo.PARENT.columnName + "=" + parent + " AND "
                         + MetricInfo.SPAN.columnName + "=" + span;
-        LOG.trace(addCustomAnnotations("Requesting columns with: " + request));
+        LOGGER.trace(addCustomAnnotations("Requesting columns with: " + request));
         ResultSet results = conn.createStatement().executeQuery(request);
         List<String> cols = new ArrayList<String>();
         while (results.next()) {
@@ -222,7 +222,7 @@ public class TraceReader {
             }
         }
         if (cols.size() < count) {
-            LOG.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
+            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
                     + " tags from rquest " + request));
         }
         return cols;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
index 122ae28..a2b84b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
@@ -22,13 +22,13 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.impl.MilliSpan;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Sink for request traces ({@link SpanReceiver}) that pushes writes to {@link TraceWriter} in a
@@ -64,7 +64,7 @@ import org.apache.phoenix.query.QueryServicesOptions;
  */
 public class TraceSpanReceiver implements SpanReceiver {
 
-    private static final Log LOG = LogFactory.getLog(TraceSpanReceiver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceSpanReceiver.class);
 
     private static final int CAPACITY = QueryServicesOptions.withDefaults().getTracingTraceBufferSize();
 
@@ -77,11 +77,11 @@ public class TraceSpanReceiver implements SpanReceiver {
     @Override
     public void receiveSpan(Span span) {
         if (span.getTraceId() != 0 && spanQueue.offer(span)) {
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Span buffered to queue " + span.toJson());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Span buffered to queue " + span.toJson());
             }
-        } else if (span.getTraceId() != 0 && LOG.isDebugEnabled()) {
-                LOG.debug("Span NOT buffered due to overflow in queue " + span.toJson());
+        } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson());
         }
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
index e823359..f8dc19e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
@@ -38,8 +38,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.util.Pair;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -68,7 +68,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * batch commit size.
  */
 public class TraceWriter {
-    private static final Log LOG = LogFactory.getLog(TraceWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceWriter.class);
 
     private static final String VARIABLE_VALUE = "?";
 
@@ -105,9 +105,9 @@ public class TraceWriter {
 
         traceSpanReceiver = getTraceSpanReceiver();
         if (traceSpanReceiver == null) {
-            LOG.warn(
+            LOGGER.warn(
                 "No receiver has been initialized for TraceWriter. Traces will not be written.");
-            LOG.warn("Restart Phoenix to try again.");
+            LOGGER.warn("Restart Phoenix to try again.");
             return;
         }
 
@@ -119,7 +119,7 @@ public class TraceWriter {
             executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS);
         }
 
-        LOG.info("Writing tracing metrics to phoenix table");
+        LOGGER.info("Writing tracing metrics to phoenix table");
     }
 
     @VisibleForTesting
@@ -142,8 +142,8 @@ public class TraceWriter {
             while (!traceSpanReceiver.isSpanAvailable()) {
                 Span span = traceSpanReceiver.getSpan();
                 if (null == span) break;
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("Span received: " + span.toJson());
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace("Span received: " + span.toJson());
                 }
                 addToBatch(span);
                 counter++;
@@ -217,9 +217,9 @@ public class TraceWriter {
             stmt += COMMAS.join(keys);
             stmt += ") VALUES (" + COMMAS.join(values) + ")";
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Logging metrics to phoenix table via: " + stmt);
-                LOG.trace("With tags: " + variableValues);
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Logging metrics to phoenix table via: " + stmt);
+                LOGGER.trace("With tags: " + variableValues);
             }
             try {
                 PreparedStatement ps = conn.prepareStatement(stmt);
@@ -237,7 +237,7 @@ public class TraceWriter {
                 MutationState newState = plan.execute();
                 state.join(newState);
             } catch (SQLException e) {
-                LOG.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt,
+                LOGGER.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt,
                     e);
             }
         }
@@ -272,14 +272,14 @@ public class TraceWriter {
                 createTable(conn, tableName);
             }
 
-            LOG.info(
+            LOGGER.info(
                 "Created new connection for tracing " + conn.toString() + " Table: " + tableName);
             return conn;
         } catch (Exception e) {
-            LOG.error("Tracing will NOT be pursued. New connection failed for tracing Table: "
+            LOGGER.error("Tracing will NOT be pursued. New connection failed for tracing Table: "
                     + tableName,
                 e);
-            LOG.error("Restart Phoenix to retry.");
+            LOGGER.error("Restart Phoenix to retry.");
             return null;
         }
     }
@@ -324,7 +324,7 @@ public class TraceWriter {
         try {
             conn.commit();
         } catch (SQLException e) {
-            LOG.error(
+            LOGGER.error(
                 "Unable to commit traces on conn: " + conn.toString() + " to table: " + tableName,
                 e);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index 35cc6dc..c120b11 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -26,8 +26,6 @@ import java.util.concurrent.Callable;
 
 import javax.annotation.Nullable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.htrace.HTraceConfiguration;
 import org.apache.phoenix.call.CallRunner;
@@ -46,6 +44,8 @@ import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.htrace.wrappers.TraceCallable;
 import org.apache.htrace.wrappers.TraceRunnable;
 import org.apache.phoenix.trace.TraceWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
@@ -56,7 +56,7 @@ import com.sun.istack.NotNull;
  */
 public class Tracing {
 
-    private static final Log LOG = LogFactory.getLog(Tracing.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Tracing.class);
 
     private static final String SEPARATOR = ".";
     // Constants for tracing across the wire
@@ -275,14 +275,14 @@ public class Tracing {
                 traceWriter.start();
             }
         } catch (RuntimeException e) {
-            LOG.warn("Tracing will outputs will not be written to any metrics sink! No "
+            LOGGER.warn("Tracing will outputs will not be written to any metrics sink! No "
                     + "TraceMetricsSink found on the classpath", e);
         } catch (IllegalAccessError e) {
             // This is an issue when we have a class incompatibility error, such as when running
             // within SquirrelSQL which uses an older incompatible version of commons-collections.
             // Seeing as this only results in disabling tracing, we swallow this exception and just
             // continue on without tracing.
-            LOG.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
+            LOGGER.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
         }
         initialized = true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 23f123e..a5f0177 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -42,7 +42,7 @@ import com.google.common.collect.ImmutableMap;
  */
 public class CSVCommonsLoader {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CSVCommonsLoader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CSVCommonsLoader.class);
 
     public static final String DEFAULT_ARRAY_ELEMENT_SEPARATOR = ":";
 
@@ -277,10 +277,10 @@ public class CSVCommonsLoader {
             totalUpserts = upsertCount;
             if (upsertCount % upsertBatchSize == 0) {
                 if (upsertCount % 1000 == 0) {
-                    LOG.info("Processed upsert #{}", upsertCount);
+                    LOGGER.info("Processed upsert #{}", upsertCount);
                 }
                 try {
-                    LOG.info("Committing after {} records", upsertCount);
+                    LOGGER.info("Committing after {} records", upsertCount);
                     conn.commit();
                 } catch (SQLException e) {
                     throw new RuntimeException(e);
@@ -290,7 +290,7 @@ public class CSVCommonsLoader {
 
         @Override
         public void errorOnRecord(CSVRecord csvRecord, Throwable throwable) {
-            LOG.error("Error upserting record " + csvRecord, throwable.getMessage());
+            LOGGER.error("Error upserting record " + csvRecord, throwable.getMessage());
             if (strict) {
                 Throwables.propagate(throwable);
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
index 7649933..d042fac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
@@ -22,10 +22,10 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
  *  comes out to basically O(log(T))
  */
 public class EquiDepthStreamHistogram {
-    private static final Log LOG = LogFactory.getLog(EquiDepthStreamHistogram.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(EquiDepthStreamHistogram.class);
 
     // used in maxSize calculation for each bar
     private static final double MAX_COEF = 1.7;
@@ -175,8 +175,8 @@ public class EquiDepthStreamHistogram {
         } else {
             smallerBar.incrementCount(countToDistribute);
         }
-        if (LOG.isTraceEnabled()) {
-            LOG.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
         }
         bars.remove(origBar);
         bars.add(newLeft);
@@ -230,8 +230,8 @@ public class EquiDepthStreamHistogram {
         bars.subList(currMinIdx, currMinIdx + 2).clear(); // remove minBars
         bars.add(newBar);
         Collections.sort(bars);
-        if (LOG.isTraceEnabled()) {
-            LOG.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
         }
         return true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
index 13eae98..4422103 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
@@ -31,8 +31,6 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -43,6 +41,8 @@ import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 import org.codehaus.jettison.json.JSONException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.InvalidProtocolBufferException;
 
@@ -71,7 +71,7 @@ public class PhoenixMRJobUtil {
     public static final int RM_CONNECT_TIMEOUT_MILLIS = 10 * 1000;
     public static final int RM_READ_TIMEOUT_MILLIS = 10 * 60 * 1000;
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMRJobUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobUtil.class);
 
     public static final String PHOENIX_MR_SCHEDULER_TYPE_NAME = "phoenix.index.mr.scheduler.type";
 
@@ -101,11 +101,11 @@ public class PhoenixMRJobUtil {
                         byte[] data = zk.getData(path, zkw, new Stat());
                         ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data);
                         proto.getRmId();
-                        LOG.info("Active RmId : " + proto.getRmId());
+                        LOGGER.info("Active RmId : " + proto.getRmId());
 
                         activeRMHost =
                                 config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId());
-                        LOG.info("activeResourceManagerHostname = " + activeRMHost);
+                        LOGGER.info("activeResourceManagerHostname = " + activeRMHost);
 
                     }
                 }
@@ -140,7 +140,7 @@ public class PhoenixMRJobUtil {
             }
 
             url = urlBuilder.toString();
-            LOG.info("Attempt to get running/submitted jobs information from RM URL = " + url);
+            LOGGER.info("Attempt to get running/submitted jobs information from RM URL = " + url);
 
             URL obj = new URL(url);
             con = (HttpURLConnection) obj.openConnection();
@@ -155,7 +155,7 @@ public class PhoenixMRJobUtil {
             if (con != null) con.disconnect();
         }
 
-        LOG.info("Result of attempt to get running/submitted jobs from RM - URL=" + url
+        LOGGER.info("Result of attempt to get running/submitted jobs from RM - URL=" + url
                 + ",ResponseCode=" + con.getResponseCode() + ",Response=" + response);
 
         return response;
@@ -182,16 +182,16 @@ public class PhoenixMRJobUtil {
 
     public static void shutdown(ExecutorService pool) throws InterruptedException {
         pool.shutdown();
-        LOG.debug("Shutdown called");
+        LOGGER.debug("Shutdown called");
         pool.awaitTermination(200, TimeUnit.MILLISECONDS);
-        LOG.debug("Await termination called to wait for 200 msec");
+        LOGGER.debug("Await termination called to wait for 200 msec");
         if (!pool.isShutdown()) {
             pool.shutdownNow();
-            LOG.debug("Await termination called to wait for 200 msec");
+            LOGGER.debug("Await termination called to wait for 200 msec");
             pool.awaitTermination(100, TimeUnit.MILLISECONDS);
         }
         if (!pool.isShutdown()) {
-            LOG.warn("Pool did not shutdown");
+            LOGGER.warn("Pool did not shutdown");
         }
     }
 
@@ -222,7 +222,7 @@ public class PhoenixMRJobUtil {
         conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemoryMB);
         conf.set(MRJobConfig.MAP_JAVA_OPTS, XMX_OPT + ((int) (mapMemoryMB * 0.9)) + "m");
 
-        LOG.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB="
+        LOGGER.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB="
                 + conf.get(MRJobConfig.MAP_MEMORY_MB) + ";" + "Map Java Opts="
                 + conf.get(MRJobConfig.MAP_JAVA_OPTS));
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index 94cbfea..7c4b7e4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -31,8 +31,6 @@ import java.util.Properties;
 import javax.annotation.Nullable;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
@@ -50,6 +48,8 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PInteger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Joiner;
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
 
 public final class QueryUtil {
 
-    private static final Log LOG = LogFactory.getLog(QueryUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryUtil.class);
 
     /**
      *  Column family name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)}
@@ -383,7 +383,7 @@ public final class QueryUtil {
             throws SQLException, ClassNotFoundException {
         setServerConnection(props);
         String url = getConnectionUrl(props, null, principal);
-        LOG.info("Creating connection with the jdbc url: " + url);
+        LOGGER.info("Creating connection with the jdbc url: " + url);
         return DriverManager.getConnection(url, props);
     }
 
@@ -395,7 +395,7 @@ public final class QueryUtil {
     private static Connection getConnection(Properties props, Configuration conf)
             throws ClassNotFoundException, SQLException {
         String url = getConnectionUrl(props, conf);
-        LOG.info("Creating connection with the jdbc url: " + url);
+        LOGGER.info("Creating connection with the jdbc url: " + url);
         props = PropertiesUtil.combineProperties(props, conf);
         return DriverManager.getConnection(url, props);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index a8170ce..ff1b917 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -31,8 +31,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -64,10 +62,12 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings("deprecation")
 public class ServerUtil {
-    private static final Log LOG = LogFactory.getLog(ServerUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerUtil.class);
     private static final int COPROCESSOR_SCAN_WORKS = VersionUtil.encodeVersion("0.98.6");
     
     private static final String FORMAT = "ERROR %d (%s): %s";
@@ -348,7 +348,7 @@ public class ServerUtil {
                     try {
                         connection.close();
                     } catch (IOException e) {
-                        LOG.warn("Unable to close coprocessor connection", e);
+                        LOGGER.warn("Unable to close coprocessor connection", e);
                     }
                 }
                 connections.clear();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
index d9ce5f2..9ef7356 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
@@ -61,7 +61,7 @@ public abstract class UpsertExecutor<RECORD, FIELD> implements Closeable {
         void errorOnRecord(RECORD record, Throwable throwable);
     }
 
-    private static final Logger LOG = LoggerFactory.getLogger(UpsertExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpsertExecutor.class);
 
     protected final Connection conn;
     protected final List<ColumnInfo> columnInfos;
@@ -77,7 +77,7 @@ public abstract class UpsertExecutor<RECORD, FIELD> implements Closeable {
         PreparedStatement preparedStatement;
         try {
             String upsertSql = QueryUtil.constructUpsertStatement(tableName, columnInfoList);
-            LOG.info("Upserting SQL data with {}", upsertSql);
+            LOGGER.info("Upserting SQL data with {}", upsertSql);
             preparedStatement = conn.prepareStatement(upsertSql);
         } catch (SQLException e) {
             throw new RuntimeException(e);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
index 69ef0b5..9e6f649 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
@@ -21,39 +21,39 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ZKBasedMasterElectionUtil {
 
-    private static final Log LOG = LogFactory.getLog(ZKBasedMasterElectionUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ZKBasedMasterElectionUtil.class);
 
     public static boolean acquireLock(ZooKeeperWatcher zooKeeperWatcher, String parentNode,
             String lockName) throws KeeperException, InterruptedException {
         // Create the parent node as Persistent
-        LOG.info("Creating the parent lock node:" + parentNode);
+        LOGGER.info("Creating the parent lock node:" + parentNode);
         ZKUtil.createWithParents(zooKeeperWatcher, parentNode);
 
         // Create the ephemeral node
         String lockNode = parentNode + "/" + lockName;
         String nodeValue = getHostName() + "_" + UUID.randomUUID().toString();
-        LOG.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue);
+        LOGGER.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue);
         // Create the ephemeral node
         try {
             zooKeeperWatcher.getRecoverableZooKeeper().create(lockNode, Bytes.toBytes(nodeValue),
                 Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
         } catch (KeeperException.NodeExistsException e) {
-            LOG.info("Could not acquire lock. Another process had already acquired the lock on Node "
+            LOGGER.info("Could not acquire lock. Another process had already acquired the lock on Node "
                     + lockName);
             return false;
         }
-        LOG.info("Obtained the lock :" + lockNode);
+        LOGGER.info("Obtained the lock :" + lockNode);
         return true;
     }
 
@@ -62,7 +62,7 @@ public class ZKBasedMasterElectionUtil {
         try {
             host = InetAddress.getLocalHost().getCanonicalHostName();
         } catch (UnknownHostException e) {
-            LOG.error("UnknownHostException while trying to get the Local Host address : ", e);
+            LOGGER.error("UnknownHostException while trying to get the Local Host address : ", e);
         }
         return host;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index cd40b44..fd58493 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -51,7 +51,7 @@ import com.google.common.base.Function;
 /** {@link UpsertExecutor} over {@link CSVRecord}s. */
 public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CsvUpsertExecutor.class);
 
     protected final String arrayElementSeparator;
 
@@ -92,10 +92,10 @@ public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on CSVRecord " + csvRecord, e);
+                LOGGER.debug("Error on CSVRecord " + csvRecord, e);
             }
             upsertListener.errorOnRecord(csvRecord, e);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
index ffa797d..87e3997 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
@@ -52,7 +52,7 @@ import com.google.common.base.Function;
 /** {@link UpsertExecutor} over {@link Map} objects, as parsed from JSON. */
 public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(JsonUpsertExecutor.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(JsonUpsertExecutor.class);
 
     /** Testing constructor. Do not use in prod. */
     @VisibleForTesting
@@ -106,10 +106,10 @@ public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
index 0388d9c..05d009c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
@@ -33,7 +33,7 @@ import com.google.common.annotations.VisibleForTesting;
 /** {@link UpsertExecutor} over {@link Map} objects, convert input record into {@link Map} objects by using regex. */
 public class RegexUpsertExecutor extends JsonUpsertExecutor {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(RegexUpsertExecutor.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(RegexUpsertExecutor.class);
 
     /** Testing constructor. Do not use in prod. */
     @VisibleForTesting
@@ -69,10 +69,10 @@ public class RegexUpsertExecutor extends JsonUpsertExecutor {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
index 7fa9c8e..26e3561 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -35,15 +33,15 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
-
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utility class for testing indexing
  */
 public class IndexTestingUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexTestingUtils.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexTestingUtils.class);
   private static final String MASTER_INFO_PORT_KEY = "hbase.master.info.port";
   private static final String RS_INFO_PORT_KEY = "hbase.regionserver.info.port";
   
@@ -65,7 +63,7 @@ public class IndexTestingUtils {
   @SuppressWarnings("javadoc")
   public static void verifyIndexTableAtTimestamp(HTable index1, List<KeyValue> expected,
       long start, long end, byte[] startKey, byte[] endKey) throws IOException {
-    LOG.debug("Scanning " + Bytes.toString(index1.getTableName()) + " between times (" + start
+    LOGGER.debug("Scanning " + Bytes.toString(index1.getTableName()) + " between times (" + start
         + ", " + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey)
         + "].");
     Scan s = new Scan(startKey, endKey);
@@ -76,7 +74,7 @@ public class IndexTestingUtils {
     ResultScanner scanner = index1.getScanner(s);
     for (Result r : scanner) {
       received.addAll(r.list());
-      LOG.debug("Received: " + r.list());
+      LOGGER.debug("Received: " + r.list());
     }
     scanner.close();
     assertEquals("Didn't get the expected kvs from the index table!", expected, received);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
index e9e025c..038fa06 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
@@ -17,20 +17,19 @@
  */
 package org.apache.phoenix.hbase.index;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 /**
  * TEst helper to stub out an {@link Abortable} when needed.
  */
 public class StubAbortable implements Abortable {
-  private static final Log LOG = LogFactory.getLog(StubAbortable.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(StubAbortable.class);
   private boolean abort;
 
   @Override
   public void abort(String reason, Throwable e) {
-    LOG.info("Aborting: " + reason, e);
+    LOGGER.info("Aborting: " + reason, e);
     abort = true;
   }
 
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 58050c1..d0bf4e3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -31,8 +31,6 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -50,6 +48,9 @@ import org.apache.phoenix.hbase.index.TableName;
 import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.junit.Rule;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -57,7 +58,7 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 public class TestIndexWriter {
-  private static final Log LOG = LogFactory.getLog(TestIndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestIndexWriter.class);
   @Rule
   public TableName testName = new TableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -90,8 +91,8 @@ public class TestIndexWriter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + testName.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + testName.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     Stoppable stop = Mockito.mock(Stoppable.class);
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -173,13 +174,13 @@ public class TestIndexWriter {
     Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
-        LOG.info("Write started");
+        LOGGER.info("Write started");
         writeStartedLatch.countDown();
         // when we interrupt the thread for shutdown, we should see this throw an interrupt too
         try {
         waitOnAbortedLatch.await();
         } catch (InterruptedException e) {
-          LOG.info("Correctly interrupted while writing!");
+          LOGGER.info("Correctly interrupted while writing!");
           throw e;
         }
         return null;
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index 55c3fb3..9f63556 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -50,13 +48,15 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 
 public class TestParalleIndexWriter {
 
-  private static final Log LOG = LogFactory.getLog(TestParalleIndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleIndexWriter.class);
   @Rule
   public TableName test = new TableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -85,8 +85,8 @@ public class TestParalleIndexWriter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + test.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + test.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     Stoppable stop = Mockito.mock(Stoppable.class);
     ExecutorService exec = Executors.newFixedThreadPool(1);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 9767eae..59a8390 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -49,13 +47,15 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 
 public class TestParalleWriterIndexCommitter {
 
-  private static final Log LOG = LogFactory.getLog(TestParalleWriterIndexCommitter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleWriterIndexCommitter.class);
   @Rule
   public TableName test = new TableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -84,8 +84,8 @@ public class TestParalleWriterIndexCommitter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + test.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + test.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
     Configuration conf =new Configuration();
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index b1e87e5..49d6513 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -30,8 +30,6 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -71,6 +69,8 @@ import org.junit.Assert;
 import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
@@ -83,7 +83,7 @@ import com.google.common.collect.Multimap;
 
 public class TestWALRecoveryCaching {
 
-  private static final Log LOG = LogFactory.getLog(TestWALRecoveryCaching.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestWALRecoveryCaching.class);
   private static final long ONE_SEC = 1000;
   private static final long ONE_MIN = 60 * ONE_SEC;
   private static final long TIMEOUT = ONE_MIN;
@@ -108,10 +108,10 @@ public class TestWALRecoveryCaching {
     public void preWALRestore(ObserverContext<RegionCoprocessorEnvironment> env, HRegionInfo info,
         HLogKey logKey, WALEdit logEdit) throws IOException {
       try {
-        LOG.debug("Restoring logs for index table");
+        LOGGER.debug("Restoring logs for index table");
         if (allowIndexTableToRecover != null) {
           allowIndexTableToRecover.await();
-          LOG.debug("Completed index table recovery wait latch");
+          LOGGER.debug("Completed index table recovery wait latch");
         }
       } catch (InterruptedException e) {
         Assert.fail("Should not be interrupted while waiting to allow the index to restore WALs.");
@@ -131,9 +131,9 @@ public class TestWALRecoveryCaching {
     @Override
     public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted,
         Exception cause) throws IOException {
-      LOG.debug("Found index update failure!");
+      LOGGER.debug("Found index update failure!");
       if (allowIndexTableToRecover != null) {
-        LOG.info("failed index write on WAL recovery - allowing index table to be restored.");
+        LOGGER.info("failed index write on WAL recovery - allowing index table to be restored.");
         allowIndexTableToRecover.countDown();
       }
       super.handleFailure(attempted, cause);
@@ -209,24 +209,24 @@ public class TestWALRecoveryCaching {
       Bytes.toBytes(indexedTableName)));
 
     // log all the current state of the server
-    LOG.info("Current Server/Region paring: ");
+    LOGGER.info("Current Server/Region paring: ");
     for (RegionServerThread t : util.getMiniHBaseCluster().getRegionServerThreads()) {
       // check all the conditions for the server to be done
       HRegionServer server = t.getRegionServer();
       if (server.isStopping() || server.isStopped() || server.isAborted()) {
-        LOG.info("\t== Offline: " + server.getServerName());
+        LOGGER.info("\t== Offline: " + server.getServerName());
         continue;
       }
       List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server.getRSRpcServices());
-      LOG.info("\t" + server.getServerName() + " regions: " + regions);
+      LOGGER.info("\t" + server.getServerName() + " regions: " + regions);
     }
 
-    LOG.debug("Killing server " + shared);
+    LOGGER.debug("Killing server " + shared);
     util.getMiniHBaseCluster().killRegionServer(shared);
-    LOG.debug("Waiting on server " + shared + "to die");
+    LOGGER.debug("Waiting on server " + shared + "to die");
     util.getMiniHBaseCluster().waitForRegionServerToStop(shared, TIMEOUT);
     // force reassign the regions from the table
-    // LOG.debug("Forcing region reassignment from the killed server: " + shared);
+    // LOGGER.debug("Forcing region reassignment from the killed server: " + shared);
     // for (HRegion region : online) {
     // util.getMiniHBaseCluster().getMaster().assign(region.getRegionName());
     // }
@@ -250,7 +250,7 @@ public class TestWALRecoveryCaching {
     ResultScanner scanner = index.getScanner(s);
     int count = 0;
     for (Result r : scanner) {
-      LOG.info("Got index table result:" + r);
+      LOGGER.info("Got index table result:" + r);
       count++;
     }
     assertEquals("Got an unexpected found of index rows", 1, count);
@@ -308,7 +308,7 @@ public class TestWALRecoveryCaching {
         // find the regionserver that matches the passed server
         List<Region> online = getRegionsFromServerForTable(cluster, server, table);
 
-        LOG.info("Shutting down and reassigning regions from " + server);
+        LOGGER.info("Shutting down and reassigning regions from " + server);
         cluster.stopRegionServer(server);
         cluster.waitForRegionServerToStop(server, TIMEOUT);
 
@@ -317,13 +317,13 @@ public class TestWALRecoveryCaching {
           cluster.getMaster().getAssignmentManager().assign(Lists.newArrayList(region.getRegionInfo()));
         }
 
-        LOG.info("Starting region server:" + server.getHostname());
+        LOGGER.info("Starting region server:" + server.getHostname());
         cluster.startRegionServer(server.getHostname(), server.getPort());
 
         cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), TIMEOUT);
 
         // start a server to get back to the base number of servers
-        LOG.info("STarting server to replace " + server);
+        LOGGER.info("STarting server to replace " + server);
         cluster.startRegionServer();
         break;
       }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java
index 5a99b69..bb6958c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java
@@ -31,8 +31,6 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hbase.security.User;
@@ -47,6 +45,8 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests ConnectionQueryServices caching when Kerberos authentication is enabled. It's not
@@ -55,7 +55,7 @@ import org.junit.Test;
  * collide and when they do not.
  */
 public class SecureUserConnectionsTest {
-    private static final Log LOG = LogFactory.getLog(SecureUserConnectionsTest.class); 
+    private static final Logger LOGGER = LoggerFactory.getLogger(SecureUserConnectionsTest.class);
     private static final int KDC_START_ATTEMPTS = 10;
 
     private static final File TEMP_DIR = new File(getClassTempDir());
@@ -84,7 +84,7 @@ public class SecureUserConnectionsTest {
                 KDC.start();
                 started = true;
             } catch (Exception e) {
-                LOG.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
+                LOGGER.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
             }
         }
         assertTrue("The embedded KDC failed to start successfully after " + KDC_START_ATTEMPTS
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
index 2cea684..bd6ef33 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
@@ -18,12 +18,12 @@
 package org.apache.phoenix.metrics;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.phoenix.trace.TracingUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Simple sink that just logs the output of all the metrics that start with
@@ -31,7 +31,7 @@ import org.apache.phoenix.trace.TracingUtils;
  */
 public class LoggingSink implements MetricsSink {
 
-    private static final Log LOG = LogFactory.getLog(LoggingSink.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LoggingSink.class);
 
     @Override
     public void init(SubsetConfiguration config) {
@@ -42,14 +42,14 @@ public class LoggingSink implements MetricsSink {
         // we could wait until flush, but this is a really lightweight process, so we just write
         // them
         // as soon as we get them
-        if (!LOG.isDebugEnabled()) {
+        if (!LOGGER.isDebugEnabled()) {
             return;
         }
-        LOG.debug("Found record:" + record.name());
+        LOGGER.debug("Found record:" + record.name());
         for (AbstractMetric metric : record.metrics()) {
             // just print the metric we care about
             if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
-                LOG.debug("\t metric:" + metric);
+                LOGGER.debug("\t metric:" + metric);
             }
         }
     }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
index cefb456..456b038 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
@@ -19,8 +19,6 @@ package org.apache.phoenix.tool;
 
 import com.google.common.collect.Maps;
 import com.google.gson.Gson;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.end2end.ChangePermissionsIT;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.BaseTest;
@@ -49,6 +47,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.phoenix.tool.PhoenixCanaryTool.propFileName;
 import static org.junit.Assert.assertFalse;
@@ -58,7 +58,7 @@ import static org.junit.Assert.assertTrue;
 @Category(NeedsOwnMiniClusterTest.class)
 public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 
-	private static final Log logger = LogFactory.getLog(ParameterizedPhoenixCanaryToolIT.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
 	private static final String stdOutSink
 			= "org.apache.phoenix.tool.PhoenixCanaryTool$StdOutSink";
 	private static final String fileOutSink
@@ -108,7 +108,7 @@ public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 			tearDownMiniClusterAsync(1);
 			setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
 					new ReadOnlyProps(clientProps.entrySet().iterator()));
-			logger.info("New cluster is spinned up with test parameters " +
+			LOGGER.info("New cluster is spinned up with test parameters " +
 					"isPositiveTestType" + this.isPositiveTestType +
 					"isNamespaceEnabled" + this.isNamespaceEnabled +
 					"resultSinkOption" + this.resultSinkOption);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
index a757780..d4fa383 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
@@ -21,8 +21,7 @@ import java.sql.Statement;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -33,6 +32,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /*
  * This test is wrt to https://issues.apache.org/jira/browse/PHOENIX-4993.Test checks 1. region
@@ -41,7 +42,7 @@ import org.junit.Test;
  */
 public class CoprocessorHConnectionTableFactoryTest extends BaseUniqueNamesOwnClusterIT {
   private static String ORG_PREFIX = "ORG";
-  private static final Log LOG = LogFactory.getLog(CoprocessorHConnectionTableFactoryTest.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(CoprocessorHConnectionTableFactoryTest.class);
 
   @BeforeClass
   public static final void doSetup() throws Exception {
@@ -70,7 +71,7 @@ public class CoprocessorHConnectionTableFactoryTest extends BaseUniqueNamesOwnCl
       }
       conn.commit();
     } catch (Exception e) {
-      LOG.error("Client side exception:" + e);
+      LOGGER.error("Client side exception:" + e);
     }
 
   }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index a4b1d0a..811e209 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -51,8 +51,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
@@ -124,6 +122,8 @@ import org.apache.phoenix.schema.stats.GuidePostsInfo;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Objects;
 import com.google.common.collect.Lists;
@@ -131,7 +131,7 @@ import com.google.common.collect.Lists;
 
 
 public class TestUtil {
-    private static final Log LOG = LogFactory.getLog(TestUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TestUtil.class);
     
     private static final Long ZERO = new Long(0);
     public static final String DEFAULT_SCHEMA_NAME = "";
@@ -814,11 +814,11 @@ public class TestUtil {
                 try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                     ResultScanner scanner = htableForRawScan.getScanner(scan);
                     List<Result> results = Lists.newArrayList(scanner);
-                    LOG.info("Results: " + results);
+                    LOGGER.info("Results: " + results);
                     compactionDone = results.isEmpty();
                     scanner.close();
                 }
-                LOG.info("Compaction done: " + compactionDone);
+                LOGGER.info("Compaction done: " + compactionDone);
                 
                 // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
                 if (!compactionDone && table.isTransactional()) {
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index 6e828bd..ef2e167 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -31,7 +31,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 class MultithreadedDiffer implements Callable<Void> {
-    private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class);
+
     private Thread t;
     private Query query;
     private ThreadTime threadTime;
@@ -82,7 +83,7 @@ class MultithreadedDiffer implements Callable<Void> {
      * Executes verification runs for a minimum of number of execution or execution duration
      */
     public Void call() throws Exception {
-        logger.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
+        LOGGER.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
                 + numberOfExecutions + "times\n\n");
         Long start = System.currentTimeMillis();
         for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
@@ -93,7 +94,7 @@ class MultithreadedDiffer implements Callable<Void> {
                 e.printStackTrace();
             }
         }
-        logger.info("\n\nThread exiting." + t.getName() + "\n\n");
+        LOGGER.info("\n\nThread exiting." + t.getName() + "\n\n");
         return null;
     }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
index c5746f9..a4285f4 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
@@ -33,7 +33,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class XMLConfigParserTest {
-    private static final Logger LOG = LoggerFactory.getLogger(XMLConfigParserTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParserTest.class);
   
     @Test
     public void testDTDInScenario() throws Exception {
@@ -45,7 +45,7 @@ public class XMLConfigParserTest {
             fail("The scenario should have failed to parse because it contains a DTD");
         } catch (UnmarshalException e) {
             // If we don't parse the DTD, the variable 'name' won't be defined in the XML
-            LOG.warn("Caught expected exception", e);
+            LOGGER.warn("Caught expected exception", e);
             Throwable cause = e.getLinkedException();
             assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException);
         }
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
index 98c492f..83a28e0 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
@@ -32,7 +32,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class XMLResultHandlerTest {
-    private static final Logger LOG = LoggerFactory.getLogger(XMLResultHandlerTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLResultHandlerTest.class);
 
     @Test
     public void testDTDInResults() throws Exception {
@@ -45,7 +45,7 @@ public class XMLResultHandlerTest {
           fail("Expected to see an exception parsing the results with a DTD");
         } catch (UnmarshalException e) {
           // If we don't parse the DTD, the variable 'name' won't be defined in the XML
-          LOG.debug("Caught expected exception", e);
+          LOGGER.debug("Caught expected exception", e);
           Throwable cause = e.getLinkedException();
           assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException);
         }
diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
index 249f8e6..d9de663 100755
--- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
+++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
@@ -19,8 +19,6 @@ package org.apache.phoenix.tracingwebapp.http;
 import java.net.URL;
 import java.security.ProtectionDomain;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.util.Tool;
@@ -28,13 +26,14 @@ import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.BasicConfigurator;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.webapp.WebAppContext;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 /**
  * tracing web app runner
  */
 public final class Main extends Configured implements Tool {
 
-    protected static final Log LOG = LogFactory.getLog(Main.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(Main.class);
     public static final String PHONIX_DBSERVER_PORT_KEY =
         "phoenix.dbserver.port";
     public static final int DEFAULT_DBSERVER_PORT = 2181;