You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by mi...@apache.org on 2019/06/11 12:51:59 UTC

[phoenix] branch master updated: [PHOENIX-5228] use slf4j for logging in phoenix project

This is an automated email from the ASF dual-hosted git repository.

mihir6692 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 950331a  [PHOENIX-5228] use slf4j for logging in phoenix project
950331a is described below

commit 950331a1e5f8ac16e4b49ed857fe78b2ab9dc6fb
Author: Xinyi <xy...@salesforce.com>
AuthorDate: Tue Jun 11 18:21:01 2019 +0530

    [PHOENIX-5228] use slf4j for logging in phoenix project
---
 .../wal/WALRecoveryRegionPostOpenIT.java           |  12 +-
 ...WALReplayWithIndexWritesAndCompressedWALIT.java |  13 ++-
 .../src/it/java/org/apache/phoenix/Sandbox.java    |   4 +-
 .../apache/phoenix/end2end/BasePermissionsIT.java  |  60 +++++-----
 .../org/apache/phoenix/end2end/BaseQueryIT.java    |   6 +-
 .../end2end/ConnectionQueryServicesTestImpl.java   |   5 +-
 .../apache/phoenix/end2end/End2EndTestDriver.java  |   4 +-
 .../apache/phoenix/end2end/OrphanViewToolIT.java   |   4 +-
 .../end2end/PartialScannerResultsDisabledIT.java   |   5 +-
 .../end2end/TableSnapshotReadsMapReduceIT.java     |   5 +-
 .../index/IndexRebuildIncrementDisableCountIT.java |  13 ++-
 .../index/InvalidIndexStateClientSideIT.java       |  11 +-
 .../end2end/index/MutableIndexReplicationIT.java   |  24 ++--
 .../end2end/index/PartialIndexRebuilderIT.java     |   4 +-
 .../execute/UpsertSelectOverlappingBatchesIT.java  |  19 ++--
 .../index/FailForUnsupportedHBaseVersionsIT.java   |   9 +-
 .../phoenix/jdbc/SecureUserConnectionsIT.java      |   8 +-
 .../phoenix/monitoring/PhoenixMetricsIT.java       |  13 ++-
 .../apache/phoenix/query/ConnectionCachingIT.java  |   6 +-
 .../phoenix/schema/stats/BaseStatsCollectorIT.java |   8 +-
 .../phoenix/schema/stats/NoOpStatsCollectorIT.java |  10 +-
 .../apache/phoenix/trace/BaseTracingTestIT.java    |  10 +-
 .../phoenix/trace/PhoenixTracingEndToEndIT.java    |  36 +++---
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      |  15 ++-
 .../IndexHalfStoreFileReaderGenerator.java         |  11 +-
 .../wal/BinaryCompatibleBaseDecoder.java           |  12 +-
 .../java/org/apache/phoenix/cache/GlobalCache.java |  13 ++-
 .../apache/phoenix/cache/ServerCacheClient.java    |  33 +++---
 .../org/apache/phoenix/cache/TenantCacheImpl.java  |   8 +-
 .../apache/phoenix/cache/aggcache/SpillFile.java   |  12 +-
 .../cache/aggcache/SpillableGroupByCache.java      |  16 +--
 .../java/org/apache/phoenix/call/CallRunner.java   |   8 +-
 .../org/apache/phoenix/compile/FromCompiler.java   |  10 +-
 .../GroupedAggregateRegionObserver.java            |  26 ++---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  92 ++++++++--------
 .../coprocessor/MetaDataRegionObserver.java        |  20 ++--
 .../coprocessor/PhoenixAccessController.java       |  22 ++--
 .../phoenix/coprocessor/ScanRegionObserver.java    |  14 +--
 .../UngroupedAggregateRegionObserver.java          |  40 +++----
 .../coprocessor/tasks/DropChildViewsTask.java      |  14 +--
 .../coprocessor/tasks/IndexRebuildTask.java        |  14 +--
 .../org/apache/phoenix/execute/AggregatePlan.java  |   4 +-
 .../org/apache/phoenix/execute/BaseQueryPlan.java  |  14 +--
 .../org/apache/phoenix/execute/HashJoinPlan.java   |  12 +-
 .../org/apache/phoenix/execute/MutationState.java  |  26 ++---
 .../java/org/apache/phoenix/execute/ScanPlan.java  |   4 +-
 .../apache/phoenix/expression/LikeExpression.java  |  20 ++--
 .../aggregator/FirstLastValueServerAggregator.java |   6 +-
 .../aggregator/SizeTrackingServerAggregators.java  |   4 +-
 .../expression/function/CollationKeyFunction.java  |  26 ++---
 .../phoenix/filter/RowKeyComparisonFilter.java     |   6 +-
 .../org/apache/phoenix/hbase/index/Indexer.java    |  42 +++----
 .../apache/phoenix/hbase/index/LockManager.java    |   8 +-
 .../hbase/index/builder/BaseIndexBuilder.java      |   8 +-
 .../hbase/index/builder/IndexBuildManager.java     |   6 +-
 .../hbase/index/covered/NonTxIndexBuilder.java     |  10 +-
 .../hbase/index/covered/data/IndexMemStore.java    |  24 ++--
 .../hbase/index/parallel/BaseTaskRunner.java       |  10 +-
 .../index/parallel/QuickFailingTaskRunner.java     |   6 +-
 .../phoenix/hbase/index/parallel/TaskBatch.java    |   8 +-
 .../hbase/index/parallel/ThreadPoolBuilder.java    |  10 +-
 .../hbase/index/parallel/ThreadPoolManager.java    |  14 +--
 .../hbase/index/util/IndexManagementUtil.java      |  10 +-
 .../phoenix/hbase/index/write/IndexWriter.java     |  12 +-
 .../hbase/index/write/IndexWriterUtils.java        |   7 +-
 .../index/write/KillServerOnFailurePolicy.java     |   8 +-
 .../index/write/ParallelWriterIndexCommitter.java  |  20 ++--
 .../hbase/index/write/RecoveryIndexWriter.java     |  14 +--
 .../TrackingParallelWriterIndexCommitter.java      |  22 ++--
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  28 ++---
 .../phoenix/index/PhoenixTransactionalIndexer.java |  10 +-
 .../phoenix/iterate/BaseResultIterators.java       |  10 +-
 .../phoenix/iterate/ChunkedResultIterator.java     |   8 +-
 .../apache/phoenix/iterate/ParallelIterators.java  |   6 +-
 .../phoenix/iterate/RoundRobinResultIterator.java  |   6 +-
 .../apache/phoenix/iterate/SnapshotScanner.java    |  12 +-
 .../phoenix/iterate/TableResultIterator.java       |   4 +-
 .../iterate/TableSnapshotResultIterator.java       |   9 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java     |  16 +--
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  26 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   8 +-
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  |  22 ++--
 .../java/org/apache/phoenix/log/QueryLogger.java   |  12 +-
 .../apache/phoenix/log/QueryLoggerDisruptor.java   |  10 +-
 .../org/apache/phoenix/log/TableLogWriter.java     |   8 +-
 .../phoenix/mapreduce/AbstractBulkLoadTool.java    |  26 ++---
 .../mapreduce/FormatToBytesWritableMapper.java     |   4 +-
 .../phoenix/mapreduce/FormatToKeyValueReducer.java |   2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  |  10 +-
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |   8 +-
 .../phoenix/mapreduce/PhoenixInputFormat.java      |  20 ++--
 .../phoenix/mapreduce/PhoenixOutputFormat.java     |   8 +-
 .../phoenix/mapreduce/PhoenixRecordReader.java     |  18 +--
 .../phoenix/mapreduce/PhoenixRecordWriter.java     |  12 +-
 .../PhoenixServerBuildIndexInputFormat.java        |   9 +-
 .../phoenix/mapreduce/PhoenixTextInputFormat.java  |   6 +-
 .../phoenix/mapreduce/RegexToKeyValueMapper.java   |   6 +-
 .../mapreduce/index/DirectHTableWriter.java        |   8 +-
 .../mapreduce/index/IndexScrutinyMapper.java       |  10 +-
 .../phoenix/mapreduce/index/IndexScrutinyTool.java |  22 ++--
 .../apache/phoenix/mapreduce/index/IndexTool.java  |  28 ++---
 .../phoenix/mapreduce/index/IndexToolUtil.java     |   4 +-
 .../index/PhoenixIndexImportDirectMapper.java      |  10 +-
 .../index/PhoenixIndexImportDirectReducer.java     |   4 +-
 .../mapreduce/index/PhoenixIndexImportMapper.java  |   6 +-
 .../index/PhoenixIndexPartialBuildMapper.java      |  10 +-
 .../index/PhoenixServerBuildIndexMapper.java       |   2 +-
 .../index/automation/PhoenixMRJobSubmitter.java    |  32 +++---
 .../mapreduce/util/PhoenixConfigurationUtil.java   |  22 ++--
 .../apache/phoenix/memory/GlobalMemoryManager.java |   4 +-
 .../java/org/apache/phoenix/metrics/Metrics.java   |  12 +-
 .../phoenix/monitoring/GlobalClientMetrics.java    |   4 +-
 .../monitoring/GlobalMetricRegistriesAdapter.java  |  17 +--
 .../phoenix/query/ConnectionQueryServicesImpl.java |  98 ++++++++---------
 .../org/apache/phoenix/query/GuidePostsCache.java  |   2 +-
 .../phoenix/query/PhoenixStatsCacheLoader.java     |   4 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  28 ++---
 .../schema/stats/DefaultStatisticsCollector.java   |  26 ++---
 .../phoenix/schema/stats/StatisticsScanner.java    |  28 ++---
 .../phoenix/schema/stats/UpdateStatisticsTool.java |  14 +--
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java |  28 ++---
 .../apache/phoenix/trace/PhoenixMetricsSink.java   |  22 ++--
 .../java/org/apache/phoenix/trace/TraceReader.java |  14 +--
 .../apache/phoenix/trace/TraceSpanReceiver.java    |  14 +--
 .../java/org/apache/phoenix/trace/TraceWriter.java |  32 +++---
 .../org/apache/phoenix/trace/util/Tracing.java     |  10 +-
 .../transaction/OmidTransactionContext.java        |   6 +-
 .../transaction/TephraTransactionContext.java      |   7 +-
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |   8 +-
 .../phoenix/util/EquiDepthStreamHistogram.java     |  14 +--
 .../java/org/apache/phoenix/util/MetaDataUtil.java |   6 +-
 .../org/apache/phoenix/util/PhoenixMRJobUtil.java  |  24 ++--
 .../java/org/apache/phoenix/util/QueryUtil.java    |  10 +-
 .../org/apache/phoenix/util/ReadOnlyProps.java     |   4 +-
 .../java/org/apache/phoenix/util/ServerUtil.java   |   9 +-
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 122 ++++++++++-----------
 .../org/apache/phoenix/util/UpsertExecutor.java    |   4 +-
 .../phoenix/util/ZKBasedMasterElectionUtil.java    |  16 +--
 .../apache/phoenix/util/csv/CsvUpsertExecutor.java |   6 +-
 .../phoenix/util/json/JsonUpsertExecutor.java      |   6 +-
 .../phoenix/util/regex/RegexUpsertExecutor.java    |   6 +-
 .../phoenix/hbase/index/IndexTestingUtils.java     |  12 +-
 .../apache/phoenix/hbase/index/StubAbortable.java  |   8 +-
 .../phoenix/hbase/index/write/TestIndexWriter.java |  14 +--
 .../hbase/index/write/TestParalleIndexWriter.java  |  10 +-
 .../write/TestParalleWriterIndexCommitter.java     |  10 +-
 .../hbase/index/write/TestWALRecoveryCaching.java  |  34 +++---
 .../org/apache/phoenix/metrics/LoggingSink.java    |  12 +-
 .../java/org/apache/phoenix/query/BaseTest.java    |  23 ++--
 .../tool/ParameterizedPhoenixCanaryToolIT.java     |  10 +-
 .../CoprocessorHConnectionTableFactoryTest.java    |   9 +-
 .../java/org/apache/phoenix/util/TestUtil.java     |  10 +-
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  28 ++---
 .../pherf/configuration/XMLConfigParser.java       |   6 +-
 .../apache/phoenix/pherf/result/ResultManager.java |   4 +-
 .../apache/phoenix/pherf/rules/RulesApplier.java   |  14 +--
 .../apache/phoenix/pherf/schema/SchemaReader.java  |   8 +-
 .../org/apache/phoenix/pherf/util/PhoenixUtil.java |  22 ++--
 .../apache/phoenix/pherf/util/ResourceList.java    |  18 +--
 .../pherf/workload/MultiThreadedRunner.java        |  10 +-
 .../pherf/workload/MultithreadedDiffer.java        |   6 +-
 .../phoenix/pherf/workload/QueryExecutor.java      |   8 +-
 .../phoenix/pherf/workload/QueryVerifier.java      |   6 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |   4 +-
 .../phoenix/pherf/workload/WriteWorkload.java      |  24 ++--
 .../phoenix/pherf/ConfigurationParserTest.java     |   4 +-
 .../apache/phoenix/pherf/XMLConfigParserTest.java  |   4 +-
 .../pherf/result/impl/XMLResultHandlerTest.java    |   4 +-
 .../apache/phoenix/tracingwebapp/http/Main.java    |   7 +-
 169 files changed, 1239 insertions(+), 1207 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
index 674c70c..0fac35b 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
@@ -37,8 +37,6 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -75,6 +73,8 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
@@ -82,7 +82,7 @@ import com.google.common.collect.Multimap;
 @Category(NeedsOwnMiniClusterTest.class)
 public class WALRecoveryRegionPostOpenIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(WALRecoveryRegionPostOpenIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WALRecoveryRegionPostOpenIT.class);
 
     private static final String DATA_TABLE_NAME="DATA_POST_OPEN";
 
@@ -145,10 +145,10 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
         @Override
         public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws IOException
         {
-            LOG.info("Found index update failure!");
+            LOGGER.info("Found index update failure!");
             handleFailureCalledCount++;
             tableReferenceToMutation=attempted;
-            LOG.info("failed index update on WAL recovery - allowing index table can be write.");
+            LOGGER.info("failed index update on WAL recovery - allowing index table can be write.");
             failIndexTableWrite=false;
             super.handleFailure(attempted, cause);
 
@@ -264,7 +264,7 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
             resultScanner = primaryTable.getScanner(scan);
             count = 0;
             for (Result result : resultScanner) {
-                LOG.info("Got data table result:" + result);
+                LOGGER.info("Got data table result:" + result);
                 count++;
             }
             assertEquals("Got an unexpected found of data rows", 1, count);
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 52cdc0c..0314289 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -26,8 +26,6 @@ import static org.mockito.Mockito.when;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -74,6 +72,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * For pre-0.94.9 instances, this class tests correctly deserializing WALEdits w/o compression. Post
@@ -91,7 +91,8 @@ import org.mockito.Mockito;
 @Ignore
 public class WALReplayWithIndexWritesAndCompressedWALIT {
 
-  public static final Log LOG = LogFactory.getLog(WALReplayWithIndexWritesAndCompressedWALIT.class);
+  public static final Logger LOGGER =
+          LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
   @Rule
   public IndexTableName table = new IndexTableName();
   private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
@@ -147,7 +148,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     UTIL.startMiniZKCluster();
 
     Path hbaseRootDir = UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
-    LOG.info("hbase.rootdir=" + hbaseRootDir);
+    LOGGER.info("hbase.rootdir=" + hbaseRootDir);
     UTIL.getConfiguration().set(HConstants.HBASE_DIR, hbaseRootDir.toString());
     UTIL.startMiniHBaseCluster(1, 1);
   }
@@ -292,7 +293,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     assertEquals("splits=" + splits, 1, splits.size());
     // Make sure the file exists
     assertTrue(fs.exists(splits.get(0)));
-    LOG.info("Split file=" + splits.get(0));
+    LOGGER.info("Split file=" + splits.get(0));
     return splits.get(0);
   }
 
@@ -305,7 +306,7 @@ private int getKeyValueCount(Table table) throws IOException {
     int count = 0;
     for (Result res : results) {
       count += res.listCells().size();
-      LOG.debug(count + ") " + res);
+      LOGGER.debug(count + ") " + res);
     }
     results.close();
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index b7bc107..ec4e920 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
  */
 public class Sandbox {
 
-    private static final Logger LOG = LoggerFactory.getLogger(Sandbox.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Sandbox.class);
 
     public static void main(String[] args) throws Exception {
         System.out.println("Starting Phoenix sandbox");
@@ -50,7 +50,7 @@ public class Sandbox {
                         testUtil.shutdownMiniCluster();
                     }
                 } catch (Exception e) {
-                    LOG.error("Exception caught when shutting down mini cluster", e);
+                    LOGGER.error("Exception caught when shutting down mini cluster", e);
                 }
             }
         });
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index 57a176b..5095d8a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -18,8 +18,30 @@ package org.apache.phoenix.end2end;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -46,34 +68,14 @@ import org.junit.FixMethodOrder;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runners.MethodSorters;
-
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Properties;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Category(NeedsOwnMiniClusterTest.class)
 @FixMethodOrder(MethodSorters.NAME_ASCENDING)
 public abstract class BasePermissionsIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(BasePermissionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BasePermissionsIT.class);
 
     static String SUPER_USER = System.getProperty("user.name");
 
@@ -279,7 +281,7 @@ public abstract class BasePermissionsIT extends BaseTest {
                     for(String tableOrSchema : tableOrSchemaList) {
                         String grantStmtSQL = "GRANT '" + actions + "' ON " + (isSchema ? " SCHEMA " : " TABLE ") + tableOrSchema + " TO "
                                 + ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                        LOG.info("Grant Permissions SQL: " + grantStmtSQL);
+                        LOGGER.info("Grant Permissions SQL: " + grantStmtSQL);
                         assertFalse(stmt.execute(grantStmtSQL));
                     }
                 }
@@ -294,7 +296,7 @@ public abstract class BasePermissionsIT extends BaseTest {
             public Object run() throws Exception {
                 try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
                     String grantStmtSQL = "GRANT '" + actions + "' TO " + " '" + user.getShortName() + "'";
-                    LOG.info("Grant Permissions SQL: " + grantStmtSQL);
+                    LOGGER.info("Grant Permissions SQL: " + grantStmtSQL);
                     assertFalse(stmt.execute(grantStmtSQL));
                 }
                 return null;
@@ -316,7 +318,7 @@ public abstract class BasePermissionsIT extends BaseTest {
                     for(String tableOrSchema : tableOrSchemaList) {
                         String revokeStmtSQL = "REVOKE ON " + (isSchema ? " SCHEMA " : " TABLE ") + tableOrSchema + " FROM "
                                 + ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                        LOG.info("Revoke Permissions SQL: " + revokeStmtSQL);
+                        LOGGER.info("Revoke Permissions SQL: " + revokeStmtSQL);
                         assertFalse(stmt.execute(revokeStmtSQL));
                     }
                 }
@@ -332,7 +334,7 @@ public abstract class BasePermissionsIT extends BaseTest {
                 try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
                     String revokeStmtSQL = "REVOKE FROM " +
                             ((ug instanceof String) ? (" GROUP " + "'" + ug + "'") : ("'" + ((User)ug).getShortName() + "'"));
-                    LOG.info("Revoke Permissions SQL: " + revokeStmtSQL);
+                    LOGGER.info("Revoke Permissions SQL: " + revokeStmtSQL);
                     assertFalse(stmt.execute(revokeStmtSQL));
                 }
                 return null;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index e88dc57..e7f3ad9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -84,7 +84,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
     protected String tableName;
     protected String indexName;
 
-    private static final Logger logger = LoggerFactory.getLogger(BaseQueryIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryIT.class);
 
     public BaseQueryIT(String idxDdl, boolean columnEncoded, boolean keepDeletedCells) throws Exception {
         StringBuilder optionBuilder = new StringBuilder();
@@ -102,7 +102,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
                         date = new Date(System.currentTimeMillis()), null, getUrl(),
                         tableDDLOptions);
         } catch (Exception e) {
-            logger.error("Exception when creating aTable ", e);
+            LOGGER.error("Exception when creating aTable ", e);
             throw e;
         }
         this.indexName = generateUniqueName();
@@ -113,7 +113,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
             try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
                 conn.createStatement().execute(this.indexDDL);
             } catch (Exception e) {
-                logger.error("Exception while creating index: " + indexDDL, e);
+                LOGGER.error("Exception while creating index: " + indexDDL, e);
                 throw e;
             }
         }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
index 969e0f4..3bb99f6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
@@ -50,7 +50,8 @@ import com.google.common.collect.Sets;
  * @since 0.1
  */
 public class ConnectionQueryServicesTestImpl extends ConnectionQueryServicesImpl {
-    private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesTestImpl.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(ConnectionQueryServicesTestImpl.class);
     protected int NUM_SLAVES_BASE = 1; // number of slaves for the cluster
     // Track open connections to free them on close as unit tests don't always do this.
     private Set<PhoenixConnection> connections = Sets.newHashSet();
@@ -85,7 +86,7 @@ public class ConnectionQueryServicesTestImpl extends ConnectionQueryServicesImpl
                         try {
                             service.close();
                         } catch (IOException e) {
-                            logger.warn(e.getMessage(), e);
+                            LOGGER.warn(e.getMessage(), e);
                         }
                     }
                 }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
index 60b01bd..76174f5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
  */
 public class End2EndTestDriver extends AbstractHBaseTool {
     
-    private static final Logger LOG = LoggerFactory.getLogger(End2EndTestDriver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(End2EndTestDriver.class);
     private static final String SHORT_REGEX_ARG = "r";
     private static final String SKIP_TESTS = "n";
     
@@ -80,7 +80,7 @@ public class End2EndTestDriver extends AbstractHBaseTool {
         try {
           testFilterRe = Pattern.compile(pattern);
         } catch (PatternSyntaxException e) {
-          LOG.error("Failed to find tests using pattern '" + pattern
+          LOGGER.error("Failed to find tests using pattern '" + pattern
               + "'. Is it a valid Java regular expression?", e);
           throw e;
         }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index ab78ecd..44a8f67 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -54,7 +54,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class OrphanViewToolIT extends ParallelStatsDisabledIT {
-    private static final Logger LOG = LoggerFactory.getLogger(OrphanViewToolIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewToolIT.class);
 
     private final boolean isMultiTenant;
     private final boolean columnEncoded;
@@ -211,7 +211,7 @@ public class OrphanViewToolIT extends ParallelStatsDisabledIT {
         }
         int count = reader.getLineNumber();
         if (count != lineCount)
-            LOG.debug(count + " != " + lineCount);
+            LOGGER.debug(count + " != " + lineCount);
         assertTrue(count == lineCount);
     }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
index 671419e..f4b8108 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
@@ -78,7 +78,8 @@ public class PartialScannerResultsDisabledIT extends ParallelStatsDisabledIT {
     private String schemaName;
     private String dataTableFullName;
     private static String indexTableFullName;
-    private static final Logger logger = LoggerFactory.getLogger(PartialScannerResultsDisabledIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PartialScannerResultsDisabledIT.class);
     private static Random random = new Random(1);
     // background writer threads
     private static Random sourceOfRandomness = new Random(0);
@@ -99,7 +100,7 @@ public class PartialScannerResultsDisabledIT extends ParallelStatsDisabledIT {
             // TODO: it's likely that less data could be written if whatever
             // config parameters decide this are lowered.
             writeSingleBatch(conn, 100, 20, dataTableFullName);
-            logger.info("Running scrutiny");
+            LOGGER.info("Running scrutiny");
             // Scutunize index to see if partial results are silently returned
             // In that case we'll get a false positive on the scrutiny run.
             long rowCount = IndexScrutiny.scrutinizeIndex(conn, dataTableFullName, indexTableFullName);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index 4aaeef2..90e7b09 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -61,7 +61,7 @@ import org.slf4j.LoggerFactory;
 
 public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
 
-  private static final Logger logger = LoggerFactory.getLogger(TableSnapshotReadsMapReduceIT.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TableSnapshotReadsMapReduceIT.class);
 
   private final static String SNAPSHOT_NAME = "FOO";
   private static final String FIELD1 = "FIELD1";
@@ -223,7 +223,8 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
       if (hRegionInfoList.size() >= expectedRegions) {
         break;
       }
-      logger.info("Sleeping for 1000 ms while waiting for " + hbaseTableName.getNameAsString() + " to split");
+      LOGGER.info("Sleeping for 1000 ms while waiting for "
+              + hbaseTableName.getNameAsString() + " to split");
       Thread.sleep(1000);
     }
   }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
index 084bee2..9b7ba91 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -29,8 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
@@ -52,11 +50,14 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
 public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Log LOG = LogFactory.getLog(IndexRebuildIncrementDisableCountIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
     private static long pendingDisableCount = 0;
     private static String ORG_PREFIX = "ORG";
     private static Result pendingDisableCountResult = null;
@@ -124,7 +125,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
             return Bytes.toLong(pendingDisableCountResult.getValue(TABLE_FAMILY_BYTES,
                 PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES));
         } catch (Exception e) {
-            LOG.error("Exception in getPendingDisableCount: " + e);
+            LOGGER.error("Exception in getPendingDisableCount: " + e);
             return 0;
         }
     }
@@ -148,7 +149,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
                         Thread.sleep(100);
                     }
                 } catch (Exception e) {
-                    LOG.error("Error in checkPendingDisableCount : " + e);
+                    LOGGER.error("Error in checkPendingDisableCount : " + e);
                 }
             }
         };
@@ -175,7 +176,7 @@ public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClust
             }
             conn.commit();
         } catch (Exception e) {
-            LOG.error("Client side exception:" + e);
+            LOGGER.error("Client side exception:" + e);
         }
     }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
index 5c1b4b5..1b085c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -26,8 +26,6 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
@@ -52,9 +50,12 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
-    private static final Log LOG = LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
 
     @Test
     public void testCachedConnections() throws Throwable {
@@ -120,7 +121,7 @@ public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
                     }
                 };
         int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
-        LOG.info("Client version: " + version);
+        LOGGER.info("Client version: " + version);
         Table ht =
                 queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
         try {
@@ -133,7 +134,7 @@ public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
             assert (PIndexState.valueOf(result.getTable().getIndexes(0).getIndexState())
                     .equals(PIndexState.DISABLE));
         } catch (Exception e) {
-            LOG.error("Exception Occurred: " + e);
+            LOGGER.error("Exception Occurred: " + e);
 
         } finally {
             Closeables.closeQuietly(ht);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index 5cc4e05..c59a6de 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -33,8 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -64,6 +62,8 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -78,7 +78,7 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class MutableIndexReplicationIT extends BaseTest {
 
-    private static final Log LOG = LogFactory.getLog(MutableIndexReplicationIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MutableIndexReplicationIT.class);
 
     public static final String SCHEMA_NAME = "";
     public static final String DATA_TABLE_NAME = "T";
@@ -137,7 +137,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         conf1 = utility1.getConfiguration();
         zkw1 = new ZKWatcher(conf1, "cluster1", null, true);
         admin=ConnectionFactory.createConnection(conf1).getAdmin();
-        LOG.info("Setup first Zk");
+        LOGGER.info("Setup first Zk");
 
         // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
         conf2 = HBaseConfiguration.create(conf1);
@@ -150,7 +150,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         utility2.setZkCluster(miniZK);
         zkw2 = new ZKWatcher(conf2, "cluster2", null, true);
 
-        LOG.info("Setup second Zk");
+        LOGGER.info("Setup second Zk");
         utility1.startMiniCluster(2);
         utility2.startMiniCluster(2);
       //replicate from cluster 1 -> cluster 2, but not back again
@@ -158,14 +158,14 @@ public class MutableIndexReplicationIT extends BaseTest {
     }
 
     private static void setupDriver() throws Exception {
-        LOG.info("Setting up phoenix driver");
+        LOGGER.info("Setting up phoenix driver");
         Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
         // Forces server cache to be used
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
         // Must update config before starting server
         URL = getLocalClusterUrl(utility1);
-        LOG.info("Connecting driver to "+URL);
+        LOGGER.info("Connecting driver to "+URL);
         driver = initAndRegisterTestDriver(URL, new ReadOnlyProps(props.entrySet().iterator()));
     }
 
@@ -204,7 +204,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             //create it as-is on the remote cluster
             admin2.createTable(desc);
 
-            LOG.info("Enabling replication on source table: "+tableName);
+            LOGGER.info("Enabling replication on source table: "+tableName);
             ColumnFamilyDescriptor[] cols = desc.getColumnFamilies();
             assertEquals(1, cols.length);
             // add the replication scope to the column
@@ -214,7 +214,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             admin.disableTable(desc.getTableName());
             admin.modifyTable(desc);
             admin.enableTable(desc.getTableName());
-            LOG.info("Replication enabled on source table: "+tableName);
+            LOGGER.info("Replication enabled on source table: "+tableName);
         }
 
 
@@ -241,7 +241,7 @@ public class MutableIndexReplicationIT extends BaseTest {
 
         // other table can't be reached through Phoenix right now - would need to change how we
         // lookup tables. For right now, we just go through an HTable
-        LOG.info("Looking up tables in replication target");
+        LOGGER.info("Looking up tables in replication target");
         TableName[] tables = admin2.listTableNames();
         org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(utility2.getConfiguration());
         Table remoteTable = hbaseConn.getTable(tables[0]);
@@ -253,7 +253,7 @@ public class MutableIndexReplicationIT extends BaseTest {
             if (ensureAnyRows(remoteTable)) {
                 break;
             }
-            LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
+            LOGGER.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
                     + " for edits to get replicated");
             Thread.sleep(REPLICATION_WAIT_TIME_MILLIS);
         }
@@ -266,7 +266,7 @@ public class MutableIndexReplicationIT extends BaseTest {
         ResultScanner scanner = remoteTable.getScanner(scan);
         boolean found = false;
         for (Result r : scanner) {
-            LOG.info("got row: " + r);
+            LOGGER.info("got row: " + r);
             found = true;
         }
         scanner.close();
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 5bd41fc..b09acd1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -80,7 +80,7 @@ import com.google.common.collect.Maps;
 @SuppressWarnings("deprecation")
 @RunWith(RunUntilFailure.class)
 public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Logger LOG = LoggerFactory.getLogger(PartialIndexRebuilderIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PartialIndexRebuilderIT.class);
     private static final Random RAND = new Random(5);
     private static final int WAIT_AFTER_DISABLED = 5000;
     private static final long REBUILD_PERIOD = 50000;
@@ -138,7 +138,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
                         Thread.interrupted();
                         throw new RuntimeException(e);
                     } catch (SQLException e) {
-                        LOG.error(e.getMessage(),e);
+                        LOGGER.error(e.getMessage(),e);
                     } finally {
                         runRebuildOnce = false;
                     }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
index d18a090..2a115e0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
@@ -67,7 +67,8 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Logger logger = LoggerFactory.getLogger(UpsertSelectOverlappingBatchesIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(UpsertSelectOverlappingBatchesIT.class);
     private Properties props;
     private static volatile String dataTable;
     private String index;
@@ -130,11 +131,11 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
                 }
                 catch (Exception e) {
                     if (ExceptionUtils.indexOfThrowable(e, InterruptedException.class) != -1) {
-                        logger.info("Interrupted, exiting", e);
+                        LOGGER.info("Interrupted, exiting", e);
                         Thread.currentThread().interrupt();
                         return;
                     }
-                    logger.error("Hit exception while writing", e);
+                    LOGGER.error("Hit exception while writing", e);
                 }
             }
         }};
@@ -215,17 +216,17 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
                     try {
                         List<RegionInfo> regions = admin.getRegions(dataTN);
                         if (regions.size() > 1) {
-                            logger.info("Found region was split");
+                            LOGGER.info("Found region was split");
                             return true;
                         }
                         if (regions.size() == 0) {
                             // This happens when region in transition or closed
-                            logger.info("No region returned");
+                            LOGGER.info("No region returned");
                             return false;
                         }
                         ;
                         RegionInfo hRegion = regions.get(0);
-                        logger.info("Attempting to split region");
+                        LOGGER.info("Attempting to split region");
                         admin.splitRegionAsync(hRegion.getRegionName(), Bytes.toBytes(2));
                         return false;
                     } catch (NotServingRegionException | DoNotRetryRegionException re) {
@@ -264,7 +265,7 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
             final Admin admin = utility.getAdmin();
             final RegionInfo dataRegion =
                     admin.getRegions(TableName.valueOf(dataTable)).get(0);
-            logger.info("Closing data table region");
+            LOGGER.info("Closing data table region");
             admin.unassign(dataRegion.getEncodedNameAsBytes(), true);
             // make sure the region is offline
             utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@@ -274,11 +275,11 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
                             admin.getRegions(dataRs.getServerName());
                     for (RegionInfo onlineRegion : onlineRegions) {
                         if (onlineRegion.equals(dataRegion)) {
-                            logger.info("Data region still online");
+                            LOGGER.info("Data region still online");
                             return false;
                         }
                     }
-                    logger.info("Region is no longer online");
+                    LOGGER.info("Region is no longer online");
                     return true;
                 }
             });
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index beb4762..f36157a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -22,8 +22,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -42,13 +40,16 @@ import org.apache.phoenix.hbase.index.covered.CoveredColumn;
 import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test that we correctly fail for versions of HBase that don't support current properties
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class FailForUnsupportedHBaseVersionsIT {
-    private static final Log LOG = LogFactory.getLog(FailForUnsupportedHBaseVersionsIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
 
     /**
      * We don't support WAL Compression for HBase &lt; 0.94.9, so we shouldn't even allow the server
@@ -153,7 +154,7 @@ public class FailForUnsupportedHBaseVersionsIT {
                 // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
                 // broken.
                 while (!server.isAborted()) {
-                    LOG.debug("Waiting on regionserver to abort..");
+                    LOGGER.debug("Waiting on regionserver to abort..");
                 }
             }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java
index 1ab54d2..2557ba3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/SecureUserConnectionsIT.java
@@ -31,8 +31,6 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hbase.security.User;
@@ -49,6 +47,8 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests ConnectionQueryServices caching when Kerberos authentication is enabled. It's not
@@ -58,7 +58,7 @@ import org.junit.experimental.categories.Category;
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class SecureUserConnectionsIT {
-    private static final Log LOG = LogFactory.getLog(SecureUserConnectionsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SecureUserConnectionsIT.class);
     private static final int KDC_START_ATTEMPTS = 10;
 
     private static final File TEMP_DIR = new File(getClassTempDir());
@@ -87,7 +87,7 @@ public class SecureUserConnectionsIT {
                 KDC.start();
                 started = true;
             } catch (Exception e) {
-                LOG.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
+                LOGGER.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
             }
         }
         assertTrue("The embedded KDC failed to start successfully after " + KDC_START_ATTEMPTS
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 8f1abf0..48a02e6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -61,8 +61,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -75,6 +73,8 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.hamcrest.CoreMatchers;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -89,7 +89,7 @@ import com.google.common.collect.Sets;
  */
 public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMetricsIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsIT.class);
 
     @Test
     public void testResetGlobalPhoenixMetrics() throws Exception {
@@ -207,9 +207,9 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
         }
 
         for (int i = 0; i < MAX_RETRIES; i++) {
-            LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " + (i + 1));
+            LOGGER.info("Verifying Global Metrics from Hadoop Sink, Retry: " + (i + 1));
             if (verifyMetricsFromSinkOnce(expectedMetrics)) {
-                LOG.info("Values from Hadoop Metrics Sink match actual values");
+                LOGGER.info("Values from Hadoop Metrics Sink match actual values");
                 return true;
             }
             try {
@@ -231,7 +231,8 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
                         long expectedValue = value;
                         long actualValue = metric.value().longValue();
                         if (expectedValue != actualValue) {
-                            LOG.warn("Metric from Hadoop Sink: " + metric.name() + " didn't match expected.");
+                            LOGGER.warn("Metric from Hadoop Sink: "
+                                    + metric.name() + " didn't match expected.");
                             return false;
                         }
                         expectedMetrics.remove(metric.name());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
index d1dda04..ec62a42 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class ConnectionCachingIT extends ParallelStatsEnabledIT {
-  private static final Logger LOG = LoggerFactory.getLogger(ConnectionCachingIT.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionCachingIT.class);
 
   @Parameters(name= "phoenix.scanner.lease.renew.enabled={0}")
   public static Iterable<String> data() {
@@ -65,7 +65,7 @@ public class ConnectionCachingIT extends ParallelStatsEnabledIT {
     // The test driver works correctly, the real one doesn't.
     String url = getUrl();
     url = url.replace(";" + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM, "");
-    LOG.info("URL to use is: {}", url);
+    LOGGER.info("URL to use is: {}", url);
 
     Connection conn = DriverManager.getConnection(url, props);
     long before = getNumCachedConnections(conn);
@@ -76,7 +76,7 @@ public class ConnectionCachingIT extends ParallelStatsEnabledIT {
     Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS / 2);
     long after = getNumCachedConnections(conn);
     for (int i = 0; i < 6; i++) {
-      LOG.info("Found {} connections cached", after);
+      LOGGER.info("Found {} connections cached", after);
       if (after <= before) {
         break;
       }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java
index 26cd581..73e0d6c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/BaseStatsCollectorIT.java
@@ -45,8 +45,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Result;
@@ -83,6 +81,8 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -97,7 +97,7 @@ import com.google.common.collect.Maps;
 @RunWith(Parameterized.class)
 public abstract class BaseStatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
 
-    private static final Log LOG = LogFactory.getLog(BaseStatsCollectorIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseStatsCollectorIT.class);
 
     private final String tableDDLOptions;
     private final boolean columnEncoded;
@@ -198,7 +198,7 @@ public abstract class BaseStatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
             if (guidePostWidth != null) {
                 updateStatisticsSql += " SET \"" + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\" = " + guidePostWidth;
             }
-            LOG.info("Running SQL to collect stats: " + updateStatisticsSql);
+            LOGGER.info("Running SQL to collect stats: " + updateStatisticsSql);
             conn.createStatement().execute(updateStatisticsSql);
         }
     }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java
index 87f58d7..a6852f4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/NoOpStatsCollectorIT.java
@@ -18,8 +18,6 @@
 package org.apache.phoenix.schema.stats;
 
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.query.QueryServices;
@@ -32,6 +30,8 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.Array;
 import java.sql.Connection;
@@ -52,7 +52,7 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 @Category(NeedsOwnMiniClusterTest.class)
 public class NoOpStatsCollectorIT extends ParallelStatsDisabledIT {
 
-    private static final Log LOG = LogFactory.getLog(NoOpStatsCollectorIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(NoOpStatsCollectorIT.class);
 
     private String fullTableName;
     private String physicalTableName;
@@ -89,7 +89,7 @@ public class NoOpStatsCollectorIT extends ParallelStatsDisabledIT {
     @Test
     public void testStatsCollectionViaSql() throws SQLException {
         String updateStatisticsSql = "UPDATE STATISTICS " + fullTableName;
-        LOG.info("Running SQL to collect stats: " + updateStatisticsSql);
+        LOGGER.info("Running SQL to collect stats: " + updateStatisticsSql);
         Statement stmt = conn.createStatement();
         try {
             stmt.execute(updateStatisticsSql);
@@ -107,7 +107,7 @@ public class NoOpStatsCollectorIT extends ParallelStatsDisabledIT {
      */
     @Test
     public void testStatsCollectionDuringMajorCompaction() throws Exception {
-        LOG.info("Running major compaction on table: " + physicalTableName);
+        LOGGER.info("Running major compaction on table: " + physicalTableName);
         TestUtil.doMajorCompaction(conn, physicalTableName);
 
         String q1 = "SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + physicalTableName + "'";
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
index 708ecad..8a9f4e0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
@@ -29,8 +29,6 @@ import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.impl.MilliSpan;
@@ -42,6 +40,8 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.After;
 import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Base test for tracing tests - helps manage getting tracing/non-tracing connections, as well as
@@ -50,7 +50,7 @@ import org.junit.Before;
 
 public class BaseTracingTestIT extends ParallelStatsDisabledIT {
 
-    private static final Log LOG = LogFactory.getLog(BaseTracingTestIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseTracingTestIT.class);
 
     protected CountDownLatch latch;
     protected int defaultTracingThreadPoolForTest = 1;
@@ -154,7 +154,7 @@ public class BaseTracingTestIT extends ParallelStatsDisabledIT {
                 }
                 return connection;
             } catch (SQLException e) {
-                LOG.error("New connection failed for tracing Table: " + tableName, e);
+                LOGGER.error("New connection failed for tracing Table: " + tableName, e);
                 return null;
             }
         }
@@ -170,7 +170,7 @@ public class BaseTracingTestIT extends ParallelStatsDisabledIT {
                 executor.shutdownNow();
                 executor.awaitTermination(5, TimeUnit.SECONDS);
             } catch (InterruptedException e) {
-                LOG.error("Failed to stop the thread. ", e);
+                LOGGER.error("Failed to stop the thread. ", e);
             }
         }
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 610195a..6557cec 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -31,8 +31,6 @@ import java.util.*;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.htrace.*;
 import org.apache.htrace.impl.ProbabilitySampler;
@@ -43,6 +41,8 @@ import org.apache.phoenix.trace.TraceReader.TraceHolder;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -52,7 +52,7 @@ import com.google.common.collect.ImmutableMap;
 @Ignore("Will need to revisit for new HDFS/HBase/HTrace, broken on 5.x")
 public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixTracingEndToEndIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTracingEndToEndIT.class);
     private static final int MAX_RETRIES = 10;
     private String enabledForLoggingTable;
     private String enableForLoggingIndex;
@@ -70,7 +70,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testWriteSpans() throws Exception {
 
-        LOG.info("testWriteSpans TableName: " + tracingTableName);
+        LOGGER.info("testWriteSpans TableName: " + tracingTableName);
         // watch our sink so we know when commits happen
         latch = new CountDownLatch(1);
 
@@ -134,7 +134,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testClientServerIndexingTracing() throws Exception {
 
-        LOG.info("testClientServerIndexingTracing TableName: " + tracingTableName);
+        LOGGER.info("testClientServerIndexingTracing TableName: " + tracingTableName);
         // one call for client side, one call for server side
         latch = new CountDownLatch(2);
         testTraceWriter.start();
@@ -145,7 +145,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
         // trace the requests we send
         Connection traceable = getTracingConnection();
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = traceable.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -160,7 +160,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         traceable.commit();
 
         // wait for the latch to countdown, as the metrics system is time-based
-        LOG.debug("Waiting for latch to complete!");
+        LOGGER.debug("Waiting for latch to complete!");
         latch.await(200, TimeUnit.SECONDS);// should be way more than GC pauses
 
         // read the traces back out
@@ -213,7 +213,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testScanTracing() throws Exception {
 
-        LOG.info("testScanTracing TableName: " + tracingTableName);
+        LOGGER.info("testScanTracing TableName: " + tracingTableName);
 
         // separate connections to minimize amount of traces that are generated
         Connection traceable = getTracingConnection();
@@ -227,7 +227,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -266,7 +266,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testScanTracingOnServer() throws Exception {
 
-        LOG.info("testScanTracingOnServer TableName: " + tracingTableName);
+        LOGGER.info("testScanTracingOnServer TableName: " + tracingTableName);
 
         // separate connections to minimize amount of traces that are generated
         Connection traceable = getTracingConnection();
@@ -280,7 +280,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -318,7 +318,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testCustomAnnotationTracing() throws Exception {
 
-        LOG.info("testCustomAnnotationTracing TableName: " + tracingTableName);
+        LOGGER.info("testCustomAnnotationTracing TableName: " + tracingTableName);
 
     	final String customAnnotationKey = "myannot";
     	final String customAnnotationValue = "a1";
@@ -335,7 +335,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         createTestTable(conn, false);
 
         // update the table, but don't trace these, to simplify the traces we read
-        LOG.debug("Doing dummy the writes to the tracked table");
+        LOGGER.debug("Doing dummy the writes to the tracked table");
         String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)";
         PreparedStatement stmt = conn.prepareStatement(insert);
         stmt.setString(1, "key1");
@@ -421,7 +421,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testSingleSpan() throws Exception {
 
-        LOG.info("testSingleSpan TableName: " + tracingTableName);
+        LOGGER.info("testSingleSpan TableName: " + tracingTableName);
 
         Properties props = new Properties(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -447,7 +447,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testMultipleSpans() throws Exception {
 
-        LOG.info("testMultipleSpans TableName: " + tracingTableName);
+        LOGGER.info("testMultipleSpans TableName: " + tracingTableName);
 
         Connection conn = getConnectionWithoutTracing();
         latch = new CountDownLatch(4);
@@ -511,7 +511,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         Iterator<SpanInfo> spanIter = trace.spans.iterator();
         for (Span span : spans) {
             SpanInfo spanInfo = spanIter.next();
-            LOG.info("Checking span:\n" + spanInfo);
+            LOGGER.info("Checking span:\n" + spanInfo);
 
             long parentId = span.getParentId();
             if(parentId == Span.ROOT_SPAN_ID) {
@@ -552,7 +552,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         outer: while (retries < MAX_RETRIES) {
             Collection<TraceHolder> traces = reader.readAll(100);
             for (TraceHolder trace : traces) {
-                LOG.info("Got trace: " + trace);
+                LOGGER.info("Got trace: " + trace);
                 found = checker.foundTrace(trace);
                 if (found) {
                     break outer;
@@ -564,7 +564,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
                     }
                 }
             }
-            LOG.info("======  Waiting for tracing updates to be propagated ========");
+            LOGGER.info("======  Waiting for tracing updates to be propagated ========");
             Thread.sleep(1000);
             retries++;
         }
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index 06cf708..fbec7b8 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
@@ -26,6 +24,10 @@ import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
 import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Marker;
+import org.slf4j.MarkerFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -35,8 +37,8 @@ import com.google.common.base.Preconditions;
  */
 public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixRpcSchedulerFactory.class);
-
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
+    private static final Marker fatal = MarkerFactory.getMarker("FATAL");
     private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
             "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
 
@@ -48,7 +50,7 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
             // happens in <=0.98.4 where the scheduler factory is not visible
             delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
         } catch (IllegalAccessError e) {
-            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
+            LOGGER.error(fatal, VERSION_TOO_OLD_FOR_INDEX_RPC);
             throw e;
         }
 
@@ -61,7 +63,8 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
         // validate index and metadata priorities are not the same
         Preconditions.checkArgument(indexPriority != metadataPriority, "Index and Metadata priority must not be same "+ indexPriority);
-        LOG.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
+        LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority "
+                + indexPriority + " and metadata rpc priority " + metadataPriority);
 
         PhoenixRpcScheduler scheduler =
                 new PhoenixRpcScheduler(conf, delegate, indexPriority, metadataPriority, priorityFunction,abortable);
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index dfeae91..e578faa 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -25,8 +25,6 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
@@ -67,13 +65,16 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.RepairUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 
 public class IndexHalfStoreFileReaderGenerator implements RegionObserver, RegionCoprocessor{
     
     private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
-    public static final Log LOG = LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
+    public static final Logger LOGGER =
+            LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
 
     @Override
     public Optional<RegionObserver> getRegionObserver() {
@@ -200,10 +201,10 @@ public class IndexHalfStoreFileReaderGenerator implements RegionObserver, Region
         if (!store.hasReferences()) {
             InternalScanner repairScanner = null;
             if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
-                LOG.info("we have found inconsistent data for local index for region:"
+                LOGGER.info("we have found inconsistent data for local index for region:"
                         + c.getEnvironment().getRegion().getRegionInfo());
                 if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) {
-                    LOG.info("Starting automatic repair of local Index for region:"
+                    LOGGER.info("Starting automatic repair of local Index for region:"
                             + c.getEnvironment().getRegion().getRegionInfo());
                     repairScanner = getRepairScanner(c.getEnvironment(), store);
                 }
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
index 80f2dd2..e30370f 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
@@ -24,10 +24,10 @@ import java.io.PushbackInputStream;
 
 import javax.annotation.Nonnull;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.codec.Codec;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is a copy paste version of org.apache.hadoop.hbase.codec.BaseDecoder class. 
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.codec.Codec;
  * HBASE-14501. See PHOENIX-2629 and PHOENIX-2636 for details.
  */
 public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
-  protected static final Log LOG = LogFactory.getLog(BinaryCompatibleBaseDecoder.class);
+  protected static final Logger LOGGER = LoggerFactory.getLogger(BinaryCompatibleBaseDecoder.class);
 
   protected final InputStream in;
   private Cell current = null;
@@ -79,11 +79,11 @@ public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
     try {
       isEof = this.in.available() == 0;
     } catch (Throwable t) {
-      LOG.trace("Error getting available for error message - ignoring", t);
+      LOGGER.trace("Error getting available for error message - ignoring", t);
     }
     if (!isEof) throw ioEx;
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Partial cell read caused by EOF", ioEx);
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Partial cell read caused by EOF", ioEx);
     }
     EOFException eofEx = new EOFException("Partial cell read");
     eofEx.initCause(ioEx);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
index 5f3e29b..a6512ea 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
@@ -54,7 +54,7 @@ import com.google.common.cache.Weigher;
  * @since 0.1
  */
 public class GlobalCache extends TenantCacheImpl {
-    private static final Logger logger = LoggerFactory.getLogger(GlobalCache.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalCache.class);
     private static volatile GlobalCache INSTANCE; 
     
     private final Configuration config;
@@ -65,16 +65,19 @@ public class GlobalCache extends TenantCacheImpl {
     
     public long clearTenantCache() {
         long unfreedBytes = getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory();
-        if (unfreedBytes != 0 && logger.isDebugEnabled()) {
-            logger.debug("Found " + (getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory()) + " bytes not freed from global cache");
+        if (unfreedBytes != 0 && LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Found " + (getMemoryManager().getMaxMemory() - getMemoryManager()
+                    .getAvailableMemory()) + " bytes not freed from global cache");
         }
         removeAllServerCache();
         for (Map.Entry<ImmutableBytesWritable, TenantCache> entry : perTenantCacheMap.entrySet()) {
             TenantCache cache = entry.getValue();
             long unfreedTenantBytes = cache.getMemoryManager().getMaxMemory() - cache.getMemoryManager().getAvailableMemory();
-            if (unfreedTenantBytes != 0 && logger.isDebugEnabled()) {
+            if (unfreedTenantBytes != 0 && LOGGER.isDebugEnabled()) {
                 ImmutableBytesWritable cacheId = entry.getKey();
-                logger.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " + Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(), cacheId.getLength()));
+                LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant "
+                        + Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(),
+                        cacheId.getLength()));
             }
             unfreedBytes += unfreedTenantBytes;
             cache.removeAllServerCache();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index bb96637..8c12311 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -40,9 +40,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.client.Table;
@@ -78,8 +75,8 @@ import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
-
-import com.google.protobuf.ByteString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -91,7 +88,7 @@ import com.google.protobuf.ByteString;
 public class ServerCacheClient {
     public static final int UUID_LENGTH = Bytes.SIZEOF_LONG;
     public static final byte[] KEY_IN_FIRST_REGION = new byte[]{0};
-    private static final Log LOG = LogFactory.getLog(ServerCacheClient.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class);
     private static final Random RANDOM = new Random();
 	public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = "hash.join.server.cache.resend.per.server";
     private final PhoenixConnection connection;
@@ -284,7 +281,10 @@ public class ServerCacheClient {
                                 cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                     // Call RPC once per server
                     servers.add(entry);
-                    if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(addCustomAnnotations("Adding cache entry " +
+                            "to be sent for " + entry, connection));
+                    }
                     final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                     final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
                     closeables.add(htable);
@@ -311,7 +311,10 @@ public class ServerCacheClient {
                         }
                     }));
                 } else {
-                    if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));}
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for "
+                                + entry + " since one already exists for that entry", connection));
+                    }
                 }
             }
             
@@ -350,7 +353,10 @@ public class ServerCacheClient {
                 }
             }
         }
-        if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));}
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(addCustomAnnotations("Cache " + cacheId +
+                    " successfully added to servers.", connection));
+        }
         return hashCacheSpec;
     }
     
@@ -376,8 +382,9 @@ public class ServerCacheClient {
              * through the current metadata boundaries and remove the cache once for each server that we originally sent
              * to.
              */
-            if (LOG.isDebugEnabled()) {
-                LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(addCustomAnnotations(
+                        "Removing Cache " + cacheId + " from servers.", connection));
             }
             for (HRegionLocation entry : locations) {
              // Call once per server
@@ -420,13 +427,13 @@ public class ServerCacheClient {
                         remainingOnServers.remove(entry);
                     } catch (Throwable t) {
                         lastThrowable = t;
-                        LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
+                        LOGGER.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
                                 t);
                     }
                 }
             }
             if (!remainingOnServers.isEmpty()) {
-                LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
                         lastThrowable);
             }
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
index dc4c9e3..8038e9e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
@@ -50,7 +50,7 @@ import com.google.common.cache.RemovalNotification;
  * @since 0.1
  */
 public class TenantCacheImpl implements TenantCache {
-    private static final Logger logger = LoggerFactory.getLogger(TenantCacheImpl.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class);
     private final int maxTimeToLiveMs;
     private final int maxPersistenceTimeToLiveMs;
     private final MemoryManager memoryManager;
@@ -199,7 +199,7 @@ public class TenantCacheImpl implements TenantCache {
     }
 
     synchronized private void evictInactiveEntries(long bytesNeeded) {
-        logger.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes");
+        LOGGER.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes");
         CacheEntry[] entries = getPersistentServerCaches().asMap().values().toArray(new CacheEntry[]{});
         Arrays.sort(entries);
         long available = this.getMemoryManager().getAvailableMemory();
@@ -208,7 +208,7 @@ public class TenantCacheImpl implements TenantCache {
             ImmutableBytesPtr cacheId = entry.getCacheId();
             getPersistentServerCaches().invalidate(cacheId);
             available = this.getMemoryManager().getAvailableMemory();
-            logger.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have " + available + " bytes available");
+            LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have " + available + " bytes available");
         }
     }
 
@@ -273,7 +273,7 @@ public class TenantCacheImpl implements TenantCache {
         }
         entry.decrementLiveQueryCount();
         if (!entry.isLive()) {
-            logger.debug("Cache ID " + Bytes.toLong(cacheId.get()) + " is no longer live, invalidate it");
+            LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get()) + " is no longer live, invalidate it");
             getServerCaches().invalidate(cacheId);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
index a47cfdf..ec08ac3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
@@ -42,7 +42,7 @@ import java.util.UUID;
  */
 public class SpillFile implements Closeable {
 
-    private static final Logger logger = LoggerFactory.getLogger(SpillFile.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SpillFile.class);
     // Default size for a single spillFile 2GB
     private static final int SPILL_FILE_SIZE = Integer.MAX_VALUE;
     // Page size for a spill file 4K
@@ -68,13 +68,13 @@ public class SpillFile implements Closeable {
             Closeables.closeQuietly(rndFile);
 
             if (file != null) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Deleting tempFile: " + file.getAbsolutePath());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Deleting tempFile: " + file.getAbsolutePath());
                 }
                 try {
                     file.delete();
                 } catch (SecurityException e) {
-                    logger.warn("IOException thrown while closing Closeable." + e);
+                    LOGGER.warn("IOException thrown while closing Closeable." + e);
                 }
             }
         }
@@ -104,8 +104,8 @@ public class SpillFile implements Closeable {
         // Create temp file in temp dir or custom dir if provided
         File tempFile = File.createTempFile(UUID.randomUUID().toString(),
           null, spillFilesDirectory);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Creating new SpillFile: " + tempFile.getAbsolutePath());
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Creating new SpillFile: " + tempFile.getAbsolutePath());
         }
         RandomAccessFile file = new RandomAccessFile(tempFile, "rw");
         file.setLength(SPILL_FILE_SIZE);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index 69d5144..bb9e63b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -93,7 +93,7 @@ import org.slf4j.LoggerFactory;
 
 public class SpillableGroupByCache implements GroupByCache {
 
-    private static final Logger logger = LoggerFactory.getLogger(SpillableGroupByCache.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SpillableGroupByCache.class);
 
     // Min size of 1st level main memory cache in bytes --> lower bound
     private static final int SPGBY_CACHE_MIN_SIZE = 4096; // 4K
@@ -148,13 +148,13 @@ public class SpillableGroupByCache implements GroupByCache {
         try {
             this.chunk = tenantCache.getMemoryManager().allocate(estSize);
         } catch (InsufficientMemoryException ime) {
-            logger.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
+            LOGGER.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
                     + GROUPBY_MAX_CACHE_SIZE_ATTRIB);
             throw ime;
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
         }
 
         // LRU cache implemented as LinkedHashMap with access order
@@ -240,8 +240,8 @@ public class SpillableGroupByCache implements GroupByCache {
             if (rowAggregators == null) {
                 // No, key never spilled before, create a new tuple
                 rowAggregators = aggregators.newAggregators(env.getConfiguration());
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Adding new aggregate bucket for row key "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new aggregate bucket for row key "
                             + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()));
                 }
             }
@@ -359,8 +359,8 @@ public class SpillableGroupByCache implements GroupByCache {
                 ImmutableBytesWritable key = ce.getKey();
                 Aggregator[] aggs = ce.getValue();
                 byte[] value = aggregators.toBytes(aggs);
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Adding new distinct group: "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new distinct group: "
                             + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators "
                             + aggs.toString() + " value = " + Bytes.toStringBinary(value));
                 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
index 7dc90f8..face677 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
@@ -19,8 +19,8 @@ package org.apache.phoenix.call;
 
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class to run a Call with a set of {@link CallWrapper}
@@ -38,7 +38,7 @@ public class CallRunner {
         public V call() throws E;
     }
 
-    private static final Log LOG = LogFactory.getLog(CallRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CallRunner.class);
 
     private CallRunner() {
         // no ctor for util class
@@ -57,7 +57,7 @@ public class CallRunner {
                 try {
                     wrappers[i].after();
                 } catch (Exception e) {
-                    LOG.error("Failed to complete wrapper " + wrappers[i], e);
+                    LOGGER.error("Failed to complete wrapper " + wrappers[i], e);
                 }
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index dbfc607..9f7bc8e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -107,7 +107,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class FromCompiler {
-    private static final Logger logger = LoggerFactory.getLogger(FromCompiler.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(FromCompiler.class);
 
     public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() {
 
@@ -646,8 +646,8 @@ public class FromCompiler {
                 timeStamp += tsAddition;
             }
             TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
-            if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
-                logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns(), connection));
+            if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns(), connection));
             }
             return tableRef;
         }
@@ -695,8 +695,8 @@ public class FromCompiler {
                 timeStamp += tsAddition;
             }
             
-            if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
-                logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " + functionNames.toString() + "at timestamp " + timeStamp, connection));
+            if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " + functionNames.toString() + "at timestamp " + timeStamp, connection));
             }
             return functionsFound;
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index e58407f..78a1fc0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -90,7 +90,7 @@ import com.google.common.collect.Maps;
  * @since 0.1
  */
 public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver implements RegionCoprocessor {
-    private static final Logger logger = LoggerFactory
+    private static final Logger LOGGER = LoggerFactory
             .getLogger(GroupedAggregateRegionObserver.class);
     public static final int MIN_DISTINCT_VALUES = 100;
     
@@ -286,8 +286,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
                 // If Aggregators not found for this distinct
                 // value, clone our original one (we need one
                 // per distinct value)
-                if (logger.isDebugEnabled()) {
-                    logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key "
                             + Bytes.toStringBinary(key.get(), key.getOffset(),
                                 key.getLength()), customAnnotations));
                 }
@@ -321,8 +321,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
                 // Generate byte array of Aggregators and set as value of row
                 byte[] value = aggregators.toBytes(rowAggregators);
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug(LogUtil.addCustomAnnotations("Adding new distinct group: "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations("Adding new distinct group: "
                             + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength())
                             + " with aggregators " + Arrays.asList(rowAggregators).toString()
                             + " value = " + Bytes.toStringBinary(value), customAnnotations));
@@ -390,8 +390,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
     private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
             final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, long limit) throws IOException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
                     + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
         }
         RegionCoprocessorEnvironment env = c.getEnvironment();
@@ -418,8 +418,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
         try {
             boolean hasMore;
             Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
-            if (logger.isDebugEnabled()) {
-                logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
             }
             Region region = c.getEnvironment().getRegion();
             boolean acquiredLock = false;
@@ -474,8 +474,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
             final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, final long limit) throws IOException {
 
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by "
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by "
                     + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
         }
         final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
@@ -516,8 +516,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
                                 aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
                                 if (!aggBoundary) {
                                     aggregators.aggregate(rowAggregators, result);
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug(LogUtil.addCustomAnnotations(
+                                    if (LOGGER.isDebugEnabled()) {
+                                        LOGGER.debug(LogUtil.addCustomAnnotations(
                                             "Row passed filters: " + kvs
                                             + ", aggregated values: "
                                             + Arrays.asList(rowAggregators),
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index dd47a7d..a3d44ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -301,7 +301,7 @@ import com.google.protobuf.Service;
  */
 @SuppressWarnings("deprecation")
 public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCoprocessor {
-    private static final Logger logger = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
 
     // Column to track tables that have been upgraded based on PHOENIX-2067
     public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE";
@@ -574,7 +574,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
         this.allowSystemCatalogRollback = config.getBoolean(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK,
                 QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK);
 
-        logger.info("Starting Tracing-Metrics Systems");
+        LOGGER.info("Starting Tracing-Metrics Systems");
         // Start the phoenix trace collection
         Tracing.addTraceMetricsSource();
         Metrics.ensureConfigured();
@@ -654,7 +654,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
             }
             done.run(builder.build());
         } catch (Throwable t) {
-            logger.error("getTable failed", t);
+            LOGGER.error("getTable failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -1029,8 +1029,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     // the PTable added to the cache doesn't include parent columns as we always call 
                     // combine columns after looking up the PTable from the cache
                     && !skipAddingIndexes) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Caching table "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Caching table "
                             + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(),
                                 cacheKey.getLength())
                             + " at seqNum " + newTable.getSequenceNumber()
@@ -2226,7 +2226,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     if (tableType == PTableType.INDEX || allowSystemCatalogRollback) {
                         result = checkTableKeyInRegion(parentTableKey, region);
                         if (result != null) {
-                            logger.error("Unable to lock parentTableKey "+Bytes.toStringBinary(parentTableKey));
+                            LOGGER.error("Unable to lock parentTableKey "+Bytes.toStringBinary(parentTableKey));
                             // if allowSystemCatalogRollback is true and we can't lock the parentTableKey (because
                             // SYSTEM.CATALOG already split) return UNALLOWED_TABLE_MUTATION so that the client
                             // knows the create statement failed
@@ -2441,10 +2441,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     }
                     else {
                         String msg = "Found unexpected mutations while creating "+fullTableName;
-                        logger.error(msg);
+                        LOGGER.error(msg);
                         for (Mutation m : remoteMutations) {
-                            logger.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
-                            logger.debug("Mutation family cell map : " + m.getFamilyCellMap());
+                            LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
+                            LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap());
                         }
                         throw new IllegalStateException(msg);
                     }
@@ -2479,7 +2479,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-            logger.error("createTable failed", t);
+            LOGGER.error("createTable failed", t);
             ProtobufUtil.setControllerException(controller,
                     ServerUtil.createIOException(fullTableName, t));
         }
@@ -2528,8 +2528,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 byte[] viewTenantId = viewInfo.getTenantId();
                 byte[] viewSchemaName = viewInfo.getSchemaName();
                 byte[] viewName = viewInfo.getTableName();
-                if (logger.isDebugEnabled()) {
-                    logger.debug("dropChildViews :" + Bytes.toString(schemaName) + "." + Bytes.toString(tableName) +
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("dropChildViews :" + Bytes.toString(schemaName) + "." + Bytes.toString(tableName) +
                             " -> " + Bytes.toString(viewSchemaName) + "." + Bytes.toString(viewName) +
                             "with tenant id :" + Bytes.toString(viewTenantId));
                 }
@@ -2546,7 +2546,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                                 new DropTableStatement(viewTableName, PTableType.VIEW, true, true, true));
                     }
                     catch (TableNotFoundException e) {
-                        logger.info("Ignoring view "+viewTableName+" as it has already been dropped");
+                        LOGGER.info("Ignoring view "+viewTableName+" as it has already been dropped");
                     }
                 }
             }
@@ -2708,10 +2708,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 if (!remoteMutations.isEmpty()) {
                     // while dropping a table all the mutations should be local
                     String msg = "Found unexpected mutations while dropping table "+SchemaUtil.getTableName(schemaName, tableName);
-                    logger.error(msg);
+                    LOGGER.error(msg);
                     for (Mutation m : remoteMutations) {
-                        logger.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
-                        logger.debug("Mutation family cell map : " + m.getFamilyCellMap());
+                        LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
+                        LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap());
                     }
                     throw new IllegalStateException(msg);
                 }
@@ -2745,7 +2745,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-          logger.error("dropTable failed", t);
+          LOGGER.error("dropTable failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -2773,7 +2773,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     SchemaUtil.getPhysicalTableName(systemTableName, env.getConfiguration()))) {
             hTable.batch(remoteMutations, null);
         } catch (Throwable t) {
-            logger.error("Unable to write mutations to " + Bytes.toString(systemTableName), t);
+            LOGGER.error("Unable to write mutations to " + Bytes.toString(systemTableName), t);
             builder.setReturnCode(mutationCode);
             builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
             return builder.build();
@@ -2851,7 +2851,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                             Task.addTask(conn, PTable.TaskType.DROP_CHILD_VIEWS, Bytes.toString(tenantId),
                                 Bytes.toString(schemaName), Bytes.toString(tableName), this.accessCheckEnabled);
                         } catch (Throwable t) {
-                            logger.error("Adding a task to drop child views failed!", t);
+                            LOGGER.error("Adding a task to drop child views failed!", t);
                         }
                     }
                 }
@@ -2942,12 +2942,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 invalidateList.add(cacheKey);
                 long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
                 PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion, false, false, null);
-                if (logger.isDebugEnabled()) {
+                if (LOGGER.isDebugEnabled()) {
                     if (table == null) {
-                        logger.debug("Table " + Bytes.toStringBinary(key)
+                        LOGGER.debug("Table " + Bytes.toStringBinary(key)
                                 + " not found in cache. Will build through scan");
                     } else {
-                        logger.debug("Table " + Bytes.toStringBinary(key)
+                        LOGGER.debug("Table " + Bytes.toStringBinary(key)
                                 + " found in cache with timestamp " + table.getTimeStamp()
                                 + " seqNum " + table.getSequenceNumber());
                     }
@@ -2959,7 +2959,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     // found
                     table = buildDeletedTable(key, cacheKey, region, clientTimeStamp);
                     if (table != null) {
-                        logger.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
+                        LOGGER.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
                         return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
                                 EnvironmentEdgeManager.currentTimeMillis(), null);
                     }
@@ -2967,7 +2967,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                             EnvironmentEdgeManager.currentTimeMillis(), null);
                 }
                 if (table.getTimeStamp() >= clientTimeStamp) {
-                    logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
+                    LOGGER.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
                             + clientTimeStamp);
                     return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
                             EnvironmentEdgeManager.currentTimeMillis(), table);
@@ -2976,15 +2976,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; // lookup TABLE_SEQ_NUM in
                                                                                          // tableMetaData
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
                             + expectedSeqNum + " and found seqNum " + table.getSequenceNumber()
                             + " with " + table.getColumns().size() + " columns: "
                             + table.getColumns());
                 }
                 if (expectedSeqNum != table.getSequenceNumber()) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("For table " + Bytes.toStringBinary(key)
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug("For table " + Bytes.toStringBinary(key)
                                 + " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum");
                     }
                     return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION,
@@ -3043,10 +3043,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     }
                     else {
                         String msg = "Found unexpected mutations while adding or dropping column to "+fullTableName;
-                        logger.error(msg);
+                        LOGGER.error(msg);
                         for (Mutation m : remoteMutations) {
-                            logger.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
-                            logger.debug("Mutation family cell map : " + m.getFamilyCellMap());
+                            LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
+                            LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap());
                         }
                         throw new IllegalStateException(msg);
                     }
@@ -3184,7 +3184,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
             // which will deadlock)
             // we also don't need to include indexes
             if (view == null) {
-                logger.warn("Found invalid tenant view row in SYSTEM.CATALOG with tenantId:"
+                LOGGER.warn("Found invalid tenant view row in SYSTEM.CATALOG with tenantId:"
                         + Bytes.toString(tenantId) + ", schema:" + Bytes.toString(schema)
                         + ", table:" + Bytes.toString(table));
                 continue;
@@ -3492,14 +3492,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                                         EnvironmentEdgeManager.currentTimeMillis(), null);
                             }
                             else if (request.getClientVersion()< MIN_SPLITTABLE_SYSTEM_CATALOG ) {
-                                logger.error(
+                                LOGGER.error(
                                     "Unable to add a column as the client is older than "
                                             + MIN_SPLITTABLE_SYSTEM_CATALOG);
                                 return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
                                         EnvironmentEdgeManager.currentTimeMillis(), null);
                             }
                             else if (allowSystemCatalogRollback) {
-                                logger.error("Unable to add a column as the "
+                                LOGGER.error("Unable to add a column as the "
                                         + QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK
                                         + " config is set to true");
                                 return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
@@ -3627,7 +3627,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 done.run(MetaDataMutationResult.toProto(result));
             }
         } catch (Throwable e) {
-            logger.error("Add column failed: ", e);
+            LOGGER.error("Add column failed: ", e);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException("Error when adding column: ", e));
         }
@@ -3955,7 +3955,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 done.run(MetaDataMutationResult.toProto(result));
             }
         } catch (Throwable e) {
-            logger.error("Drop column failed: ", e);
+            LOGGER.error("Drop column failed: ", e);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException("Error when dropping column: ", e));
         }
@@ -4065,7 +4065,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
         Configuration config = env.getConfiguration();
         if (isTablesMappingEnabled
                 && MetaDataProtocol.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > request.getClientVersion()) {
-            logger.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");
+            LOGGER.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");
             ProtobufUtil.setControllerException(controller,
                     ServerUtil.createIOException(
                             SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
@@ -4082,7 +4082,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES, HConstants.LATEST_TIMESTAMP, null,
                             request.getClientVersion(), false, false, null);
         } catch (Throwable t) {
-            logger.error("loading system catalog table inside getVersion failed", t);
+            LOGGER.error("loading system catalog table inside getVersion failed", t);
             ProtobufUtil.setControllerException(controller,
               ServerUtil.createIOException(
                 SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
@@ -4356,7 +4356,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 rowLock.release();
             }
         } catch (Throwable t) {
-          logger.error("updateIndexState failed", t);
+          LOGGER.error("updateIndexState failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -4370,7 +4370,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
     private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) {
         MetaDataMutationResult result = checkKeyInRegion(key, region, MutationCode.TABLE_NOT_IN_REGION);
         if (result!=null) {
-            logger.error("Table rowkey " + Bytes.toStringBinary(key)
+            LOGGER.error("Table rowkey " + Bytes.toStringBinary(key)
             + " is not in the current region " + region.getRegionInfo());
         }
         return result;
@@ -4424,7 +4424,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                     GlobalCache.getInstance(this.env).getMetaDataCache();
             metaDataCache.invalidate(cacheKey);
         } catch (Throwable t) {
-            logger.error("clearTableFromCache failed", t);
+            LOGGER.error("clearTableFromCache failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -4521,7 +4521,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
             done.run(builder.build());
             return;
         } catch (Throwable t) {
-            logger.error("getFunctions failed", t);
+            LOGGER.error("getFunctions failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(functionNames.toString(), t));
         }
@@ -4595,7 +4595,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-          logger.error("createFunction failed", t);
+          LOGGER.error("createFunction failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(Bytes.toString(functionName), t));
         }
@@ -4647,7 +4647,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-          logger.error("dropFunction failed", t);
+          LOGGER.error("dropFunction failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(Bytes.toString(functionName), t));
         }
@@ -4762,7 +4762,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-            logger.error("Creating the schema" + schemaName + "failed", t);
+            LOGGER.error("Creating the schema" + schemaName + "failed", t);
             ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
         }
     }
@@ -4806,7 +4806,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-            logger.error("drop schema failed:", t);
+            LOGGER.error("drop schema failed:", t);
             ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index ad78f7e..2347c5e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -482,7 +482,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 								long disabledTimeStampVal = index.getIndexDisableTimestamp();
 								if (disabledTimeStampVal != 0) {
                                     if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) {
-                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild); 
+                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild);
                                     }
 								    signOfDisableTimeStamp = Long.signum(disabledTimeStampVal);
 	                                disabledTimeStampVal = Math.abs(disabledTimeStampVal);
@@ -499,13 +499,13 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 							}
 							// No indexes are disabled, so skip this table
 							if (earliestDisableTimestamp == Long.MAX_VALUE) {
-		                        LOG.debug("No indexes are disabled so continuing");
+                                LOG.debug("No indexes are disabled so continuing");
 								continue;
 							}
 							long scanBeginTime = Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs);
                             long scanEndTime = Math.min(latestUpperBoundTimestamp,
                                     getTimestampForBatch(scanBeginTime,batchExecutedPerTableMap.get(dataPTable.getName())));
-							LOG.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
+                            LOG.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
 									+ " from timestamp=" + scanBeginTime + " until " + scanEndTime);
 							
 							TableRef tableRef = new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false);
@@ -546,7 +546,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 								        IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
 								            latestUpperBoundTimestamp);
 								        batchExecutedPerTableMap.remove(dataPTable.getName());
-								        LOG.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
+                                        LOG.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
 								    } else {
 								        // Increment timestamp so that client sees updated disable timestamp
 								        IndexUtil.updateIndexState(conn, indexTableFullName, indexPTable.getIndexState(),
@@ -556,34 +556,34 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 								            noOfBatches = 0l;
 								        }
 								        batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-								        LOG.info(
+                                        LOG.info(
 								            "During Round-robin build: Successfully updated index disabled timestamp  for "
 								                + indexTableFullName + " to " + scanEndTime);
 								    }
 								} catch (SQLException e) {
-								    LOG.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
+                                    LOG.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
 								}
 							}
 						} catch (Exception e) {
-							LOG.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
+                            LOG.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
 						}
 					}
 				}
 			} catch (Throwable t) {
-				LOG.warn("ScheduledBuildIndexTask failed!", t);
+                LOG.warn("ScheduledBuildIndexTask failed!", t);
 			} finally {
 				if (scanner != null) {
 					try {
 						scanner.close();
 					} catch (IOException ignored) {
-						LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
+                        LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
 					}
 				}
 				if (conn != null) {
 					try {
 						conn.close();
 					} catch (SQLException ignored) {
-						LOG.debug("ScheduledBuildIndexTask can't close connection", ignored);
+                        LOG.debug("ScheduledBuildIndexTask can't close connection", ignored);
 					}
 				}
 			}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index dad663d..997f5a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -28,8 +28,6 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -69,6 +67,8 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.MetaDataUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.RpcCallback;
@@ -81,9 +81,9 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
     AtomicReference<ArrayList<MasterObserver>> accessControllers = new AtomicReference<>();
     private boolean accessCheckEnabled;
     private UserProvider userProvider;
-    public static final Log LOG = LogFactory.getLog(PhoenixAccessController.class);
-    private static final Log AUDITLOG =
-            LogFactory.getLog("SecurityLogger."+PhoenixAccessController.class.getName());
+    public static final Logger LOGGER = LoggerFactory.getLogger(PhoenixAccessController.class);
+    private static final Logger AUDITLOG =
+            LoggerFactory.getLogger("SecurityLogger."+PhoenixAccessController.class.getName());
     
     @Override
     public Optional<MetaDataEndpointObserver> getPhoenixObserver() {
@@ -122,7 +122,7 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
         this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
                 QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
         if (!this.accessCheckEnabled) {
-            LOG.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
+            LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
         }
         if (env instanceof PhoenixMetaDataControllerEnvironment) {
             this.env = (PhoenixMetaDataControllerEnvironment)env;
@@ -535,8 +535,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
                 }
               }
             }
-        } else if (LOG.isDebugEnabled()) {
-            LOG.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
+        } else if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
         }
         return false;
     }
@@ -561,7 +561,7 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
     }
 
     private static final class Superusers {
-        private static final Log LOG = LogFactory.getLog(Superusers.class);
+        private static final Logger LOGGER = LoggerFactory.getLogger(Superusers.class);
 
         /** Configuration key for superusers */
         public static final String SUPERUSER_CONF_KEY = org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting a name
@@ -589,8 +589,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
                     + "authorization checks for internal operations will not work correctly!");
             }
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Current user name is " + systemUser.getShortName());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Current user name is " + systemUser.getShortName());
             }
             String currentUser = systemUser.getShortName();
             String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 08fa321..d985395 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -24,8 +24,6 @@ import java.util.List;
 import java.util.NavigableMap;
 import java.util.Optional;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -46,6 +44,8 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnImpl;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES;
 
@@ -61,7 +61,7 @@ import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES;
  */
 public class ScanRegionObserver extends BaseScannerRegionObserver implements RegionCoprocessor {
 
-    private static final Log LOG = LogFactory.getLog(ScanRegionObserver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ScanRegionObserver.class);
     public static final byte[] DYN_COLS_METADATA_CELL_QUALIFIER = Bytes.toBytes("D#");
     public static final String DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION =
             "_DynColsMetadataStoredForMutation";
@@ -132,8 +132,8 @@ public class ScanRegionObserver extends BaseScannerRegionObserver implements Reg
             Put dynColShadowCellsPut = null;
             if (m instanceof Put && Bytes.equals(m.getAttribute(
                     DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION), TRUE_BYTES)) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Adding dynamic column metadata for table: " + tableName + ". Put :" +
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding dynamic column metadata for table: " + tableName + ". Put :" +
                             m.toString());
                 }
                 NavigableMap<byte[], List<Cell>> famCellMap = m.getFamilyCellMap();
@@ -181,8 +181,8 @@ public class ScanRegionObserver extends BaseScannerRegionObserver implements Reg
         ByteArrayOutputStream qual = new ByteArrayOutputStream();
         qual.write(DYN_COLS_METADATA_CELL_QUALIFIER);
         qual.write(dynCol.getColumnQualifierBytes());
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Storing shadow cell for dynamic column metadata for dynamic column : " +
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Storing shadow cell for dynamic column metadata for dynamic column : " +
                     dynCol.getFamilyName().getString() + "." + dynCol.getName().getString());
         }
         return qual.toByteArray();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index f739d38..d45f047 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -214,7 +214,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
     private int scansReferenceCount = 0;
     @GuardedBy("lock")
     private boolean isRegionClosingOrSplitting = false;
-    private static final Logger logger = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
     private KeyValueBuilder kvBuilder;
     private Configuration upsertSelectConfig;
     private Configuration compactionConfig;
@@ -293,7 +293,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
           }
       }
       // TODO: should we use the one that is all or none?
-      logger.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
+      LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
       region.batchMutate(mutations.toArray(mutationArray));
     }
 
@@ -321,7 +321,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
           return;
       }
 
-        logger.debug("Committing batch of " + mutations.size() + " mutations for " + table);
+        LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + table);
         try {
             table.batch(mutations, null);
         } catch (InterruptedException e) {
@@ -448,7 +448,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
         boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
         if (isDescRowKeyOrderUpgrade) {
-            logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
+            LOGGER.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
             projectedTable = deserializeTable(descRowKeyTableBytes);
             try {
                 writeToTable = PTableImpl.builderWithColumns(projectedTable,
@@ -570,8 +570,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             rowAggregators = aggregators.getAggregators();
             Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
             Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
-            if (logger.isDebugEnabled()) {
-                logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
             }
             boolean useIndexProto = true;
             byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
@@ -850,7 +850,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 synchronized (lock) {
                     scansReferenceCount--;
                     if (scansReferenceCount < 0) {
-                        logger.warn(
+                        LOGGER.warn(
                             "Scan reference count went below zero. Something isn't correct. Resetting it back to zero");
                         scansReferenceCount = 0;
                     }
@@ -868,8 +868,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 }
             }
         }
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
         }
 
         final boolean hadAny = hasAny;
@@ -910,7 +910,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             try {
                 res.close();
             } catch (IOException e) {
-                logger.error("Closing resource: " + res + " failed: ", e);
+                LOGGER.error("Closing resource: " + res + " failed: ", e);
             }
         }
     }
@@ -1068,8 +1068,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
               } catch (Exception e) {
                 // If we can't reach the stats table, don't interrupt the normal
                 // compaction operation, just log a warning.
-                if (logger.isWarnEnabled()) {
-                  logger.warn("Unable to collect stats for " + table, e);
+                if (LOGGER.isWarnEnabled()) {
+                  LOGGER.warn("Unable to collect stats for " + table, e);
                 }
               }
               return internalScanner;
@@ -1161,7 +1161,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 }
             }
         } catch (IOException e) {
-            logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
+            LOGGER.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
             throw e;
         } finally {
             region.closeRegionOperation();
@@ -1221,7 +1221,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             }
         } else {
             rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT;
-            logger.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
+            LOGGER.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
                     + region.getRegionInfo().getRegionNameAsString());
         }
         byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
@@ -1318,18 +1318,18 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 }
                 return compactionRunning ? COMPACTION_UPDATE_STATS_ROW_COUNT : rowCount;
             } catch (IOException e) {
-                logger.error("IOException in update stats: " + Throwables.getStackTraceAsString(e));
+                LOGGER.error("IOException in update stats: " + Throwables.getStackTraceAsString(e));
                 throw e;
             } finally {
                 try {
                     if (noErrors && !compactionRunning) {
                         statsCollector.updateStatistics(region, scan);
-                        logger.info("UPDATE STATISTICS finished successfully for scanner: "
+                        LOGGER.info("UPDATE STATISTICS finished successfully for scanner: "
                                 + innerScanner + ". Number of rows scanned: " + rowCount
                                 + ". Time: " + (System.currentTimeMillis() - startTime));
                     }
                     if (compactionRunning) {
-                        logger.info("UPDATE STATISTICS stopped in between because major compaction was running for region "
+                        LOGGER.info("UPDATE STATISTICS stopped in between because major compaction was running for region "
                                 + region.getRegionInfo().getRegionNameAsString());
                     }
                 } finally {
@@ -1454,7 +1454,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                         // FIXME need to handle views and indexes on views as well
                         for (PTable index : indexes) {
                             if (index.getIndexDisableTimestamp() != 0) {
-                                logger.info(
+                                LOGGER.info(
                                     "Modifying major compaction scanner to retain deleted cells for a table with disabled index: "
                                             + fullTableName);
                                 options.setKeepDeletedCells(KeepDeletedCells.TRUE);
@@ -1464,10 +1464,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                         }
                     } catch (Exception e) {
                         if (e instanceof TableNotFoundException) {
-                            logger.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
+                            LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
                             // non-Phoenix HBase tables won't be found, do nothing
                         } else {
-                            logger.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
+                            LOGGER.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
                                     + fullTableName,
                                     e);
                         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
index f00e1f6..121efd4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
@@ -1,7 +1,5 @@
 package org.apache.phoenix.coprocessor.tasks;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.coprocessor.MetaDataEndpointImpl;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.TaskRegionObserver;
@@ -10,6 +8,8 @@ import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.task.Task;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.SQLException;
 import java.sql.Timestamp;
@@ -20,7 +20,7 @@ import java.util.Properties;
  *
  */
 public class DropChildViewsTask extends BaseTask {
-    public static final Log LOG = LogFactory.getLog(DropChildViewsTask.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(DropChildViewsTask.class);
 
     public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) {
         PhoenixConnection pconn = null;
@@ -44,14 +44,14 @@ public class DropChildViewsTask extends BaseTask {
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
             } else if (System.currentTimeMillis() < timeMaxInterval + timestamp.getTime()) {
                 // skip this task as it has not been expired and its parent table has not been dropped yet
-                LOG.info("Skipping a child view drop task. The parent table has not been dropped yet : " +
+                LOGGER.info("Skipping a child view drop task. The parent table has not been dropped yet : " +
                         taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                         " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) +
                         " and timestamp " + timestamp.toString());
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, "");
             }
             else {
-                LOG.warn(" A drop child view task has expired and will be marked as failed : " +
+                LOGGER.warn(" A drop child view task has expired and will be marked as failed : " +
                         taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                         " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) +
                         " and timestamp " + timestamp.toString());
@@ -59,7 +59,7 @@ public class DropChildViewsTask extends BaseTask {
             }
         }
         catch (Throwable t) {
-            LOG.warn("Exception while dropping a child view task. " +
+            LOGGER.warn("Exception while dropping a child view task. " +
                     taskRecord.getSchemaName()  + "." + taskRecord.getTableName() +
                     " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) +
                     " and timestamp " + timestamp.toString(), t);
@@ -69,7 +69,7 @@ public class DropChildViewsTask extends BaseTask {
                 try {
                     pconn.close();
                 } catch (SQLException ignored) {
-                    LOG.debug("DropChildViewsTask can't close connection", ignored);
+                    LOGGER.debug("DropChildViewsTask can't close connection", ignored);
                 }
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
index 754ea8e..559c28c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
@@ -3,8 +3,6 @@ package org.apache.phoenix.coprocessor.tasks;
 import com.google.common.base.Strings;
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.mapreduce.Cluster;
@@ -16,6 +14,8 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.task.Task;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.Connection;
 import java.sql.SQLException;
@@ -31,7 +31,7 @@ public class IndexRebuildTask extends BaseTask  {
     public static final String DISABLE_BEFORE = "DisableBefore";
     public static final String REBUILD_ALL = "RebuildAll";
 
-    public static final Log LOG = LogFactory.getLog(IndexRebuildTask.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildTask.class);
 
     @Override
     public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) {
@@ -54,7 +54,7 @@ public class IndexRebuildTask extends BaseTask  {
             if (Strings.isNullOrEmpty(indexName)) {
                 String str = "Index name is not found. Index rebuild cannot continue " +
                         "Data : " + data;
-                LOG.warn(str);
+                LOGGER.warn(str);
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str);
             }
 
@@ -94,7 +94,7 @@ public class IndexRebuildTask extends BaseTask  {
             return null;
         }
         catch (Throwable t) {
-            LOG.warn("Exception while running index rebuild task. " +
+            LOGGER.warn("Exception while running index rebuild task. " +
                     "It will be retried in the next system task table scan : " +
                     taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                     " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) +
@@ -105,7 +105,7 @@ public class IndexRebuildTask extends BaseTask  {
                 try {
                     conn.close();
                 } catch (SQLException e) {
-                    LOG.debug("IndexRebuildTask can't close connection");
+                    LOGGER.debug("IndexRebuildTask can't close connection");
                 }
             }
         }
@@ -148,7 +148,7 @@ public class IndexRebuildTask extends BaseTask  {
 
             if (job != null && job.isComplete()) {
                 if (job.isSuccessful()) {
-                    LOG.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName());
+                    LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName());
                     return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
                 } else {
                     return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index fb722d3..5d4d1d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -90,7 +90,7 @@ public class AggregatePlan extends BaseQueryPlan {
     private final Expression having;
     private List<KeyRange> splits;
     private List<List<Scan>> scans;
-    private static final Logger logger = LoggerFactory.getLogger(AggregatePlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(AggregatePlan.class);
     private boolean isSerial;
     private OrderBy actualOutputOrderBy;
 
@@ -112,7 +112,7 @@ public class AggregatePlan extends BaseQueryPlan {
         boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL);
         boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); 
         if (hasSerialHint && !canBeExecutedSerially) {
-            logger.warn("This query cannot be executed serially. Ignoring the hint");
+            LOGGER.warn("This query cannot be executed serially. Ignoring the hint");
         }
         this.isSerial = hasSerialHint && canBeExecutedSerially;
         this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, groupBy, context);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 7674e4c..bf28029 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -81,6 +79,8 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
@@ -95,7 +95,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class BaseQueryPlan implements QueryPlan {
-	private static final Log LOG = LogFactory.getLog(BaseQueryPlan.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryPlan.class);
     protected static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K
     
     protected final TableRef tableRef;
@@ -357,13 +357,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
             }
         }
         
-        if (LOG.isDebugEnabled()) {
-        	LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
         }
         
         ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
-        if (LOG.isDebugEnabled()) {
-        	LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
         }
 
         // wrap the iterator so we start/end tracing as we expect
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index b5cd6b1..2117d22 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -34,8 +34,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -83,6 +81,8 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.CostUtil;
 import org.apache.phoenix.util.SQLCloseables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -90,7 +90,7 @@ import com.google.common.collect.Sets;
 import org.apache.phoenix.util.ServerUtil;
 
 public class HashJoinPlan extends DelegateQueryPlan {
-    private static final Log LOG = LogFactory.getLog(HashJoinPlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(HashJoinPlan.class);
     private static final Random RANDOM = new Random();
 
     private final SelectStatement statement;
@@ -553,9 +553,9 @@ public class HashJoinPlan extends DelegateQueryPlan {
                     } else {
                         cacheId = Bytes.toBytes(RANDOM.nextLong());
                     }
-                    LOG.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString);
+                    LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString);
                     if (cache == null) {
-                        LOG.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId));
+                        LOGGER.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId));
                         cache = parent.hashClient.addHashCache(ranges, cacheId, iterator,
                                 plan.getEstimatedSize(), hashExpressions, singleValueOnly, usePersistentCache,
                                 parent.delegate.getTableRef().getTable(), keyRangeRhsExpression,
@@ -564,7 +564,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
                         boolean isSet = parent.firstJobEndTime.compareAndSet(0, endTime);
                         if (!isSet && (endTime
                                 - parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive) {
-                            LOG.warn(addCustomAnnotations(
+                            LOGGER.warn(addCustomAnnotations(
                                 "Hash plan [" + index
                                         + "] execution seems too slow. Earlier hash cache(s) might have expired on servers.",
                                 parent.delegate.getContext().getConnection()));
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index df084a3..2769ca1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -115,7 +115,7 @@ import com.google.common.collect.Sets;
  * Tracks the uncommitted state
  */
 public class MutationState implements SQLCloseable {
-    private static final Logger logger = LoggerFactory.getLogger(MutationState.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MutationState.class);
     private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0];
     private static final int MAX_COMMIT_RETRIES = 3;
 
@@ -1052,8 +1052,8 @@ public class MutationState implements SQLCloseable {
                             itrListMutation.remove();
 
                             batchCount++;
-                            if (logger.isDebugEnabled())
-                                logger.debug("Sent batch of " + mutationBatch.size() + " for "
+                            if (LOGGER.isDebugEnabled())
+                                LOGGER.debug("Sent batch of " + mutationBatch.size() + " for "
                                         + Bytes.toString(htableName));
                         }
                         child.stop();
@@ -1086,7 +1086,7 @@ public class MutationState implements SQLCloseable {
                                 // If it fails again, we don't retry.
                                 String msg = "Swallowing exception and retrying after clearing meta cache on connection. "
                                         + inferredE;
-                                logger.warn(LogUtil.addCustomAnnotations(msg, connection));
+                                LOGGER.warn(LogUtil.addCustomAnnotations(msg, connection));
                                 connection.getQueryServices().clearTableRegionCache(TableName.valueOf(htableName));
 
                                 // add a new child span as this one failed
@@ -1265,8 +1265,8 @@ public class MutationState implements SQLCloseable {
                             finishSuccessful = true;
                         }
                     } catch (SQLException e) {
-                        if (logger.isInfoEnabled())
-                            logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer()
+                        if (LOGGER.isInfoEnabled())
+                            LOGGER.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer()
                                     + " with retry count of " + retryCount);
                         retryCommit = (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION
                                 .getErrorCode() && retryCount < MAX_COMMIT_RETRIES);
@@ -1280,9 +1280,9 @@ public class MutationState implements SQLCloseable {
                         if (!finishSuccessful) {
                             try {
                                 phoenixTransactionContext.abort();
-                                if (logger.isInfoEnabled()) logger.info("Abort successful");
+                                if (LOGGER.isInfoEnabled()) LOGGER.info("Abort successful");
                             } catch (SQLException e) {
-                                if (logger.isInfoEnabled()) logger.info("Abort failed with " + e);
+                                if (LOGGER.isInfoEnabled()) LOGGER.info("Abort failed with " + e);
                                 if (sqlE == null) {
                                     sqlE = e;
                                 } else {
@@ -1336,7 +1336,7 @@ public class MutationState implements SQLCloseable {
      * @throws SQLException
      */
     private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) throws SQLException {
-        if (logger.isInfoEnabled()) logger.info("Checking for index updates as of " + getInitialWritePointer());
+        if (LOGGER.isInfoEnabled()) LOGGER.info("Checking for index updates as of " + getInitialWritePointer());
         MetaDataClient client = new MetaDataClient(connection);
         PMetaData cache = connection.getMetaDataCache();
         boolean addedAnyIndexes = false;
@@ -1363,13 +1363,13 @@ public class MutationState implements SQLCloseable {
                 // that an index was dropped and recreated with the same name but different
                 // indexed/covered columns.
                 addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes()));
-                if (logger.isInfoEnabled())
-                    logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to "
+                if (LOGGER.isInfoEnabled())
+                    LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to "
                             + updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes());
             }
         }
-        if (logger.isInfoEnabled())
-            logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer()
+        if (LOGGER.isInfoEnabled())
+            LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer()
                     + " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables"));
         // If all tables are immutable, we know the conflict we got was due to our DDL/DML fence.
         // If any indexes were added, then the conflict might be due to DDL/DML fence.
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index 38d47c9..d2019fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -88,7 +88,7 @@ import org.slf4j.LoggerFactory;
  * @since 0.1
  */
 public class ScanPlan extends BaseQueryPlan {
-    private static final Logger logger = LoggerFactory.getLogger(ScanPlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ScanPlan.class);
     private List<KeyRange> splits;
     private List<List<Scan>> scans;
     private boolean allowPageFilter;
@@ -139,7 +139,7 @@ public class ScanPlan extends BaseQueryPlan {
             boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); 
             if (!canBeExecutedSerially) { 
                 if (hasSerialHint) {
-                    logger.warn("This query cannot be executed serially. Ignoring the hint");
+                    LOGGER.warn("This query cannot be executed serially. Ignoring the hint");
                 }
                 return false;
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
index 532012f..fa72c83 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
@@ -51,7 +51,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class LikeExpression extends BaseCompoundExpression {
-    private static final Logger logger = LoggerFactory.getLogger(LikeExpression.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LikeExpression.class);
 
     private static final String ZERO_OR_MORE = "\\E.*\\Q";
     private static final String ANY_ONE = "\\E.\\Q";
@@ -267,15 +267,15 @@ public abstract class LikeExpression extends BaseCompoundExpression {
         AbstractBasePattern pattern = this.pattern;
         if (pattern == null) { // TODO: don't allow? this is going to be slooowwww
             if (!getPatternExpression().evaluate(tuple, ptr)) {
-                if (logger.isTraceEnabled()) {
-                    logger.trace("LIKE is FALSE: pattern is null");
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace("LIKE is FALSE: pattern is null");
                 }
                 return false;
             }
             String value = (String) PVarchar.INSTANCE.toObject(ptr, getPatternExpression().getSortOrder());
             pattern = compilePattern(value);
-            if (logger.isTraceEnabled()) {
-                logger.trace("LIKE pattern is expression: " + pattern.pattern());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("LIKE pattern is expression: " + pattern.pattern());
             }
         }
 
@@ -283,21 +283,21 @@ public abstract class LikeExpression extends BaseCompoundExpression {
         SortOrder strSortOrder = strExpression.getSortOrder();
         PVarchar strDataType = PVarchar.INSTANCE;
         if (!strExpression.evaluate(tuple, ptr)) {
-            if (logger.isTraceEnabled()) {
-                logger.trace("LIKE is FALSE: child expression is null");
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("LIKE is FALSE: child expression is null");
             }
             return false;
         }
 
         String value = null;
-        if (logger.isTraceEnabled()) {
+        if (LOGGER.isTraceEnabled()) {
             value = (String) strDataType.toObject(ptr, strSortOrder);
         }
         strDataType.coerceBytes(ptr, strDataType, strSortOrder, SortOrder.ASC);
         pattern.matches(ptr);
-        if (logger.isTraceEnabled()) {
+        if (LOGGER.isTraceEnabled()) {
             boolean matched = ((Boolean) PBoolean.INSTANCE.toObject(ptr)).booleanValue();
-            logger.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
+            LOGGER.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
         }
         return true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
index b41c6c6..f647c45 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
  */
 public class FirstLastValueServerAggregator extends BaseAggregator {
 
-    private static final Logger logger = LoggerFactory.getLogger(FirstLastValueServerAggregator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(FirstLastValueServerAggregator.class);
     protected List<Expression> children;
     protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY);
     protected byte[] topValue;
@@ -88,7 +88,7 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
                 try {
                     addFlag = true;
                 } catch (Exception e) {
-                    logger.error(e.getMessage());
+                    LOGGER.error(e.getMessage());
                 }
             } else {
                 if (isAscending) {
@@ -180,7 +180,7 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
         try {
             ptr.set(payload.getPayload());
         } catch (IOException ex) {
-            logger.error(ex.getMessage());
+            LOGGER.error(ex.getMessage());
             return false;
         }
         return true;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
index 983968b..e057173 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
@@ -25,7 +25,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class SizeTrackingServerAggregators extends ServerAggregators {
-    private static final Logger logger = LoggerFactory.getLogger(SizeTrackingServerAggregators.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SizeTrackingServerAggregators.class);
 
     private final MemoryChunk chunk;
     private final int sizeIncrease;
@@ -50,7 +50,7 @@ public class SizeTrackingServerAggregators extends ServerAggregators {
             expressions[i].reset();
         }
         while(dsize > chunk.getSize()) {
-            logger.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize());
+            LOGGER.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize());
             chunk.resize(chunk.getSize() + sizeIncrease);
         }
         memoryUsed = dsize;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
index 570e30d..b2fd132 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Locale;
 
 import org.apache.commons.lang3.BooleanUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
@@ -37,6 +35,8 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.VarBinaryFormatter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.force.db.i18n.LinguisticSort;
 import com.force.i18n.LocaleUtils;
@@ -87,7 +87,7 @@ import com.force.i18n.LocaleUtils;
 		@FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true) })
 public class CollationKeyFunction extends ScalarFunction {
 
-	private static final Log LOG = LogFactory.getLog(CollationKeyFunction.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(CollationKeyFunction.class);
 
 	public static final String NAME = "COLLATION_KEY";
 
@@ -114,8 +114,8 @@ public class CollationKeyFunction extends ScalarFunction {
 			return false;
 		}
 		String inputString = (String) PVarchar.INSTANCE.toObject(ptr, expression.getSortOrder());
-		if (LOG.isTraceEnabled()) {
-			LOG.trace("CollationKey inputString: " + inputString);
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace("CollationKey inputString: " + inputString);
 		}
 
 		if (inputString == null) {
@@ -124,8 +124,8 @@ public class CollationKeyFunction extends ScalarFunction {
 
 		byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
 		}
 
 		ptr.set(collationKeyByteArray);
@@ -138,19 +138,19 @@ public class CollationKeyFunction extends ScalarFunction {
 		Integer collatorStrength = getLiteralValue(3, Integer.class);
 		Integer collatorDecomposition = getLiteralValue(4, Integer.class);
 
-		if (LOG.isTraceEnabled()) {
+		if (LOGGER.isTraceEnabled()) {
 			StringBuilder logInputsMessage = new StringBuilder();
 			logInputsMessage.append("Input (literal) arguments:").append("localeISOCode: " + localeISOCode)
 					.append(", useSpecialUpperCaseCollator: " + useSpecialUpperCaseCollator)
 					.append(", collatorStrength: " + collatorStrength)
 					.append(", collatorDecomposition: " + collatorDecomposition);
-			LOG.trace(logInputsMessage);
+			LOGGER.trace(logInputsMessage.toString());
 		}
 
 		Locale locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode);
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace(String.format("Locale: " + locale.toLanguageTag()));
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace(String.format("Locale: " + locale.toLanguageTag()));
 		}
 
 		LinguisticSort linguisticSort = LinguisticSort.get(locale);
@@ -166,8 +166,8 @@ public class CollationKeyFunction extends ScalarFunction {
 			collator.setDecomposition(collatorDecomposition);
 		}
 
-		if (LOG.isTraceEnabled()) {
-			LOG.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+		if (LOGGER.isTraceEnabled()) {
+			LOGGER.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
 					collator.getStrength(), collator.getDecomposition(),
 					BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
 		}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
index 2eb69bd..df81957 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class RowKeyComparisonFilter extends BooleanExpressionFilter {
-    private static final Logger logger = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
 
     private boolean evaluate = true;
     private boolean keepRow = false;
@@ -70,8 +70,8 @@ public class RowKeyComparisonFilter extends BooleanExpressionFilter {
         if (evaluate) {
             inputTuple.setKey(v.getRowArray(), v.getRowOffset(), v.getRowLength());
             this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple));
-            if (logger.isTraceEnabled()) {
-                logger.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
                         + " row " + inputTuple);
             }
             evaluate = false;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index c34ffd2..668d0a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -29,8 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -86,6 +84,8 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
@@ -113,7 +113,7 @@ import com.google.common.collect.Multimap;
  */
 public class Indexer implements RegionObserver, RegionCoprocessor {
 
-  private static final Log LOG = LogFactory.getLog(Indexer.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(Indexer.class);
   private static final OperationStatus IGNORE = new OperationStatus(OperationStatusCode.SUCCESS);
   private static final OperationStatus NOWRITE = new OperationStatus(OperationStatusCode.SUCCESS);
   
@@ -234,7 +234,7 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
                 StoreFailuresInCachePolicy.class, IndexFailurePolicy.class);
           IndexFailurePolicy policy =
               policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits);
-          LOG.debug("Setting up recovery writter with failure policy: " + policy.getClass());
+          LOGGER.debug("Setting up recovery writter with failure policy: " + policy.getClass());
           recoveryWriter =
               new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer");
         } catch (Exception ex) {
@@ -242,7 +242,7 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
         }
       } catch (NoSuchMethodError ex) {
           disabled = true;
-          LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
+          LOGGER.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
       }
   }
 
@@ -321,8 +321,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
               }
               metricSource.incrementSlowDuplicateKeyCheckCalls();
           }
@@ -345,8 +345,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -497,8 +497,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
 
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -568,8 +568,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
            removeBatchMutateContext(c);
            long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
            if (duration >= slowIndexWriteThreshold) {
-               if (LOG.isDebugEnabled()) {
-                   LOG.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
+               if (LOGGER.isDebugEnabled()) {
+                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
                }
                metricSource.incrementNumSlowIndexWriteCalls();
            }
@@ -608,8 +608,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
 
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexWriteThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
               }
               metricSource.incrementNumSlowIndexWriteCalls();
           }
@@ -666,7 +666,7 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           return;
         }
 
-        LOG.info("Found some outstanding index updates that didn't succeed during"
+        LOGGER.info("Found some outstanding index updates that didn't succeed during"
                 + " WAL replay - attempting to replay now.");
 
         // do the usual writer stuff, killing the server again, if we can't manage to make the index
@@ -674,14 +674,14 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
         try {
             writer.writeAndKillYourselfOnFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
         } catch (IOException e) {
-                LOG.error("During WAL replay of outstanding index updates, "
+                LOGGER.error("During WAL replay of outstanding index updates, "
                         + "Exception is thrown instead of killing server during index writing", e);
         }
     } finally {
          long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
          if (duration >= slowPostOpenThreshold) {
-             if (LOG.isDebugEnabled()) {
-                 LOG.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
+             if (LOGGER.isDebugEnabled()) {
+                 LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
              }
              metricSource.incrementNumSlowPostOpenCalls();
          }
@@ -716,8 +716,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
       } finally {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowPreWALRestoreThreshold) {
-              if (LOG.isDebugEnabled()) {
-                  LOG.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
+              if (LOGGER.isDebugEnabled()) {
+                  LOGGER.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
               }
               metricSource.incrementNumSlowPreWALRestoreCalls();
           }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
index 02e4c3c..c65fc9b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/LockManager.java
@@ -25,12 +25,12 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -41,7 +41,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
  *
  */
 public class LockManager {
-    private static final Log LOG = LogFactory.getLog(LockManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LockManager.class);
 
     private final ConcurrentHashMap<ImmutableBytesPtr, RowLockContext> lockedRows =
             new ConcurrentHashMap<ImmutableBytesPtr, RowLockContext>();
@@ -99,7 +99,7 @@ public class LockManager {
             success = true;
             return result;
         } catch (InterruptedException ie) {
-            LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
+            LOGGER.warn("Thread interrupted waiting for lock on row: " + rowKey);
             InterruptedIOException iie = new InterruptedIOException();
             iie.initCause(ie);
             if (traceScope != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
index 0b7d9ef..86867ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
@@ -14,8 +14,6 @@ import java.lang.reflect.Constructor;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Increment;
@@ -27,6 +25,8 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
 import org.apache.phoenix.hbase.index.covered.IndexCodec;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Basic implementation of the {@link IndexBuilder} that doesn't do any actual work of indexing.
@@ -38,7 +38,7 @@ import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
  */
 public abstract class BaseIndexBuilder implements IndexBuilder {
     public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class";
-    private static final Log LOG = LogFactory.getLog(BaseIndexBuilder.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseIndexBuilder.class);
 
     protected boolean stopped;
     protected RegionCoprocessorEnvironment env;
@@ -120,7 +120,7 @@ public abstract class BaseIndexBuilder implements IndexBuilder {
 
     @Override
     public void stop(String why) {
-        LOG.debug("Stopping because: " + why);
+        LOGGER.debug("Stopping because: " + why);
         this.stopped = true;
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
index 4b6df89..326f1f4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
@@ -22,8 +22,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Stoppable;
@@ -37,13 +35,15 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
 import org.apache.phoenix.hbase.index.Indexer;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.index.PhoenixIndexMetaData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage the building of index updates from primary table updates.
  */
 public class IndexBuildManager implements Stoppable {
 
-  private static final Log LOG = LogFactory.getLog(IndexBuildManager.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexBuildManager.class);
   private final IndexBuilder delegate;
   private boolean stopped;
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
index 1e4da87..645f2c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
@@ -13,8 +13,6 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -25,6 +23,8 @@ import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;
 import org.apache.phoenix.hbase.index.covered.data.LocalTable;
 import org.apache.phoenix.hbase.index.covered.update.ColumnTracker;
 import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Build covered indexes for phoenix updates.
@@ -36,7 +36,7 @@ import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
  * bloated index that needs to be cleaned up by a background process.
  */
 public class NonTxIndexBuilder extends BaseIndexBuilder {
-    private static final Log LOG = LogFactory.getLog(NonTxIndexBuilder.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(NonTxIndexBuilder.class);
 
     protected LocalHBaseState localTable;
 
@@ -55,8 +55,8 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
 
         batchMutationAndAddUpdates(manager, state, mutation, indexMetaData);
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Found index updates for Mutation: " + mutation + "\n" + manager);
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Found index updates for Mutation: " + mutation + "\n" + manager);
         }
 
         return manager.toMap();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 301d825..e38e0b1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -20,8 +20,6 @@ package org.apache.phoenix.hbase.index.covered.data;
 import java.util.Iterator;
 import java.util.SortedSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellComparatorImpl;
@@ -34,6 +32,8 @@ import org.apache.phoenix.hbase.index.covered.KeyValueStore;
 import org.apache.phoenix.hbase.index.covered.LocalTableState;
 import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
 import org.apache.phoenix.util.PhoenixKeyValueUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Like the HBase {@link MemStore}, but without all that extra work around maintaining snapshots and
@@ -74,7 +74,7 @@ import org.apache.phoenix.util.PhoenixKeyValueUtil;
  */
 public class IndexMemStore implements KeyValueStore {
 
-  private static final Log LOG = LogFactory.getLog(IndexMemStore.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexMemStore.class);
   private IndexKeyValueSkipListSet kvset;
   private CellComparator comparator;
 
@@ -101,8 +101,8 @@ public class IndexMemStore implements KeyValueStore {
 
   @Override
   public void add(Cell kv, boolean overwrite) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Inserting: " + toString(kv));
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Inserting: " + toString(kv));
     }
     // if overwriting, we will always update
     if (!overwrite) {
@@ -112,17 +112,17 @@ public class IndexMemStore implements KeyValueStore {
       kvset.add(kv);
     }
 
-    if (LOG.isTraceEnabled()) {
+    if (LOGGER.isTraceEnabled()) {
       dump();
     }
   }
 
   private void dump() {
-    LOG.trace("Current kv state:\n");
+    LOGGER.trace("Current kv state:\n");
     for (Cell kv : this.kvset) {
-      LOG.trace("KV: " + toString(kv));
+      LOGGER.trace("KV: " + toString(kv));
     }
-    LOG.trace("========== END MemStore Dump ==================\n");
+    LOGGER.trace("========== END MemStore Dump ==================\n");
   }
 
   private String toString(Cell kv) {
@@ -132,12 +132,12 @@ public class IndexMemStore implements KeyValueStore {
 
   @Override
   public void rollback(Cell kv) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Rolling back: " + toString(kv));
+    if (LOGGER.isTraceEnabled()) {
+      LOGGER.trace("Rolling back: " + toString(kv));
     }
     // If the key is in the store, delete it
     this.kvset.remove(kv);
-    if (LOG.isTraceEnabled()) {
+    if (LOGGER.isTraceEnabled()) {
       dump();
     }
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
index 5cd3fcb..145c95b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java
@@ -23,9 +23,9 @@ import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
@@ -41,7 +41,7 @@ import com.google.common.util.concurrent.MoreExecutors;
  */
 public abstract class BaseTaskRunner implements TaskRunner {
 
-  private static final Log LOG = LogFactory.getLog(BaseTaskRunner.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(BaseTaskRunner.class);
   protected ListeningExecutorService writerPool;
   private boolean stopped;
 
@@ -77,7 +77,7 @@ public abstract class BaseTaskRunner implements TaskRunner {
 
   private void logAndNotifyAbort(Exception e, Abortable abort) {
     String msg = "Found a failed task because: " + e.getMessage();
-    LOG.error(msg, e);
+    LOGGER.error(msg, e);
     abort.abort(msg, e.getCause());
   }
 
@@ -118,7 +118,7 @@ public abstract class BaseTaskRunner implements TaskRunner {
     if (this.stopped) {
       return;
     }
-    LOG.info("Shutting down task runner because " + why);
+    LOGGER.info("Shutting down task runner because " + why);
     this.writerPool.shutdownNow();
   }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
index 5b9717e..720ad98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
@@ -20,8 +20,8 @@ package org.apache.phoenix.hbase.index.parallel;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -32,7 +32,7 @@ import com.google.common.util.concurrent.ListenableFuture;
  */
 public class QuickFailingTaskRunner extends BaseTaskRunner {
 
-  static final Log LOG = LogFactory.getLog(QuickFailingTaskRunner.class);
+  static final Logger LOGGER = LoggerFactory.getLogger(QuickFailingTaskRunner.class);
 
   /**
    * @param service thread pool to which {@link Task}s are submitted. This service is then 'owned'
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
index 62e4522..208464e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java
@@ -22,9 +22,9 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A group of {@link Task}s. The tasks are all bound together using the same {@link Abortable} (
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.Abortable;
  * @param <V> expected result type from all the tasks
  */
 public class TaskBatch<V> implements Abortable {
-  private static final Log LOG = LogFactory.getLog(TaskBatch.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TaskBatch.class);
   private AtomicBoolean aborted = new AtomicBoolean();
   private List<Task<V>> tasks;
 
@@ -57,7 +57,7 @@ public class TaskBatch<V> implements Abortable {
     if (this.aborted.getAndSet(true)) {
       return;
     }
-    LOG.info("Aborting batch of tasks because " + why);
+    LOGGER.info("Aborting batch of tasks because " + why);
   }
 
   @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
index 58a976a..bedd495 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java
@@ -17,10 +17,10 @@
  */
 package org.apache.phoenix.hbase.index.parallel;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper utility to make a thread pool from a configuration based on reasonable defaults and passed
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Pair;
  */
 public class ThreadPoolBuilder {
 
-  private static final Log LOG = LogFactory.getLog(ThreadPoolBuilder.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolBuilder.class);
   private static final long DEFAULT_TIMEOUT = 60;
   private static final int DEFAULT_MAX_THREADS = 1;// is there a better default?
   private Pair<String, Long> timeout;
@@ -72,7 +72,7 @@ public class ThreadPoolBuilder {
       maxThreads =
           key == null ? this.maxThreads.getSecond() : conf.getInt(key, this.maxThreads.getSecond());
     }
-    LOG.trace("Creating pool builder with max " + maxThreads + " threads ");
+    LOGGER.trace("Creating pool builder with max " + maxThreads + " threads ");
     return maxThreads;
   }
 
@@ -84,7 +84,7 @@ public class ThreadPoolBuilder {
           key == null ? this.timeout.getSecond() : conf.getLong(key, this.timeout.getSecond());
     }
 
-    LOG.trace("Creating pool builder with core thread timeout of " + timeout + " seconds ");
+    LOGGER.trace("Creating pool builder with core thread timeout of " + timeout + " seconds ");
     return timeout;
   }
 }
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
index db3b845..3dbe439 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java
@@ -26,9 +26,9 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Threads;
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.util.Threads;
  */
 public class ThreadPoolManager {
 
-  private static final Log LOG = LogFactory.getLog(ThreadPoolManager.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolManager.class);
 
   /**
    * Get an executor for the given name, based on the passed {@link Configuration}. If a thread pool
@@ -62,7 +62,7 @@ public class ThreadPoolManager {
     ThreadPoolExecutor pool = (ThreadPoolExecutor) poolCache.get(builder.getName());
     if (pool == null || pool.isTerminating() || pool.isShutdown()) {
       pool = getDefaultExecutor(builder);
-      LOG.info("Creating new pool for " + builder.getName());
+      LOGGER.info("Creating new pool for " + builder.getName());
       poolCache.put(builder.getName(), pool);
     }
     ((ShutdownOnUnusedThreadPoolExecutor) pool).addReference();
@@ -120,14 +120,14 @@ public class ThreadPoolManager {
     @Override
     protected void finalize() {
       // override references counter if we go out of scope - ensures the pool gets cleaned up
-      LOG.info("Shutting down pool '" + poolName + "' because no more references");
+      LOGGER.info("Shutting down pool '" + poolName + "' because no more references");
       super.finalize();
     }
 
     @Override
     public void shutdown() {
       if (references.decrementAndGet() <= 0) {
-        LOG.debug("Shutting down pool " + this.poolName);
+        LOGGER.debug("Shutting down pool " + this.poolName);
         super.shutdown();
       }
     }
@@ -135,7 +135,7 @@ public class ThreadPoolManager {
     @Override
     public List<Runnable> shutdownNow() {
       if (references.decrementAndGet() <= 0) {
-        LOG.debug("Shutting down pool " + this.poolName + " NOW!");
+        LOGGER.debug("Shutting down pool " + this.poolName + " NOW!");
         return super.shutdownNow();
       }
       return Collections.emptyList();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index bcde1a0..a39782e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -26,8 +26,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -46,6 +44,8 @@ import org.apache.phoenix.hbase.index.covered.Batch;
 import org.apache.phoenix.hbase.index.covered.data.LazyValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Longs;
@@ -66,7 +66,7 @@ public class IndexManagementUtil {
     public static final String WAL_EDIT_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
 
     private static final String INDEX_HLOG_READER_CLASS_NAME = "org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader";
-    private static final Log LOG = LogFactory.getLog(IndexManagementUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexManagementUtil.class);
 
     public static boolean isWALEditCodecSet(Configuration conf) {
         // check to see if the WALEditCodec is installed
@@ -198,11 +198,11 @@ public class IndexManagementUtil {
         try {
             throw e;
         } catch (IOException | FatalIndexBuildingFailureException e1) {
-            LOG.info("Rethrowing " + e);
+            LOGGER.info("Rethrowing " + e);
             throw e1;
         }
         catch (Throwable e1) {
-            LOG.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
+            LOGGER.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
             throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
index c28288c..86624fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java
@@ -23,8 +23,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -34,6 +32,8 @@ import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -47,7 +47,7 @@ import com.google.common.collect.Multimap;
  */
 public class IndexWriter implements Stoppable {
 
-  private static final Log LOG = LogFactory.getLog(IndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriter.class);
   public static final String INDEX_COMMITTER_CONF_KEY = "index.writer.commiter.class";
   public static final String INDEX_FAILURE_POLICY_CONF_KEY = "index.writer.failurepolicy.class";
   private AtomicBoolean stopped = new AtomicBoolean(false);
@@ -154,8 +154,8 @@ public class IndexWriter implements Stoppable {
             boolean allowLocalUpdates, int clientVersion) throws IOException {
     try {
       write(toWrite, allowLocalUpdates, clientVersion);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Done writing all index updates!\n\t" + toWrite);
+      if (LOGGER.isTraceEnabled()) {
+        LOGGER.trace("Done writing all index updates!\n\t" + toWrite);
       }
     } catch (Exception e) {
       this.failurePolicy.handleFailure(toWrite, e);
@@ -227,7 +227,7 @@ public class IndexWriter implements Stoppable {
       // already stopped
       return;
     }
-    LOG.debug("Stopping because " + why);
+    LOGGER.debug("Stopping because " + why);
     this.writer.stop(why);
     this.failurePolicy.stop(why);
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index ffece49..73110a2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -22,9 +22,6 @@ import java.util.concurrent.ExecutorService;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Table;
@@ -36,10 +33,12 @@ import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class IndexWriterUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexWriterUtils.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriterUtils.class);
 
   /**
    * Maximum number of threads to allow per-table when writing. Each writer thread (from
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
index 6257bea..610c108 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
@@ -19,13 +19,13 @@ package org.apache.phoenix.hbase.index.write;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.hbase.index.builder.FatalIndexBuildingFailureException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -34,7 +34,7 @@ import com.google.common.collect.Multimap;
  */
 public class KillServerOnFailurePolicy implements IndexFailurePolicy {
 
-  private static final Log LOG = LogFactory.getLog(KillServerOnFailurePolicy.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(KillServerOnFailurePolicy.class);
   private Stoppable stoppable;
 
   @Override
@@ -64,7 +64,7 @@ public class KillServerOnFailurePolicy implements IndexFailurePolicy {
     // notify the regionserver of the failure
     String msg =
         "Could not update the index table, killing server region because couldn't write to an index table";
-    LOG.error(msg, cause);
+    LOGGER.error(msg, cause);
     throw new FatalIndexBuildingFailureException(msg,cause);
   }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 0d8b90a..7a29276 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -17,8 +17,6 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -37,6 +35,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -55,7 +55,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
     public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
     private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
     public static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY = "index.writer.threads.keepalivetime";
-    private static final Log LOG = LogFactory.getLog(ParallelWriterIndexCommitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ParallelWriterIndexCommitter.class);
 
     private HTableFactory retryingFactory;
     private HTableFactory noRetriesfactory;
@@ -143,8 +143,8 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                     // early exit, if that's the case
                     throwFailureIfDone();
 
-                    if (LOG.isTraceEnabled()) {
-                        LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
+                    if (LOGGER.isTraceEnabled()) {
+                        LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference);
                     }
                     Table table = null;
                     try {
@@ -158,9 +158,9 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                                 return null;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isDebugEnabled()) {
-                                    LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
-                                            + ignord);
+                                if (LOGGER.isDebugEnabled()) {
+                                    LOGGER.debug("indexRegion.batchMutate failed and fall back " +
+                                            "to HTable.batch(). Got error=" + ignord);
                                 }
                             }
                         }
@@ -200,7 +200,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
         } catch (EarlyExitFailure e) {
             propagateFailure(e);
         } catch (ExecutionException e) {
-            LOG.error("Found a failed index update!");
+            LOGGER.error("Found a failed index update!");
             propagateFailure(e.getCause());
         }
 
@@ -228,7 +228,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
      */
     @Override
     public void stop(String why) {
-        LOG.info("Shutting down " + this.getClass().getSimpleName() + " because " + why);
+        LOGGER.info("Shutting down " + this.getClass().getSimpleName() + " because " + why);
         this.pool.stop(why);
         this.retryingFactory.shutdown();
         this.noRetriesfactory.shutdown();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index fefb812..d7ffd33 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -24,8 +24,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
@@ -36,6 +34,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -48,7 +48,7 @@ import com.google.common.collect.Multimap;
  */
 public class RecoveryIndexWriter extends IndexWriter {
 
-    private static final Log LOG = LogFactory.getLog(RecoveryIndexWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RecoveryIndexWriter.class);
     private Set<HTableInterfaceReference> nonExistingTablesList = new HashSet<HTableInterfaceReference>();
     private Admin admin;
 
@@ -84,7 +84,7 @@ public class RecoveryIndexWriter extends IndexWriter {
         } catch (MultiIndexWriteFailureException e) {
             for (HTableInterfaceReference table : e.getFailedTables()) {
                 if (!admin.tableExists(TableName.valueOf(table.getTableName()))) {
-                    LOG.warn("Failure due to non existing table: " + table.getTableName());
+                    LOGGER.warn("Failure due to non existing table: " + table.getTableName());
                     nonExistingTablesList.add(table);
                 } else {
                     throw e;
@@ -114,7 +114,7 @@ public class RecoveryIndexWriter extends IndexWriter {
             ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
             HTableInterfaceReference table = tables.get(ptr);
             if (nonExistingTablesList.contains(table)) {
-                LOG.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
+                LOGGER.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
                 continue;
             }
             if (table == null) {
@@ -135,13 +135,13 @@ public class RecoveryIndexWriter extends IndexWriter {
                 try {
                     admin.getConnection().close();
                 } catch (IOException e) {
-                    LOG.error("Closing the connection failed: ", e);
+                    LOGGER.error("Closing the connection failed: ", e);
                 }
             }
             try {
                 admin.close();
             } catch (IOException e) {
-                LOG.error("Closing the admin failed: ", e);
+                LOGGER.error("Closing the admin failed: ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 6ed6c5b..c502d89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -19,8 +19,6 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
@@ -42,6 +40,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -67,7 +67,8 @@ import com.google.common.collect.Multimap;
  * client.
  */
 public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
-    private static final Log LOG = LogFactory.getLog(TrackingParallelWriterIndexCommitter.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
 
     public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
     private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
@@ -165,15 +166,16 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                                 return Boolean.TRUE;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isTraceEnabled()) {
-                                    LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
-                                            + ignord);
+                                if (LOGGER.isTraceEnabled()) {
+                                    LOGGER.trace("indexRegion.batchMutate failed and fall " +
+                                            "back to HTable.batch(). Got error=" + ignord);
                                 }
                             }
                         }
 
-                        if (LOG.isTraceEnabled()) {
-                            LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
+                        if (LOGGER.isTraceEnabled()) {
+                            LOGGER.trace("Writing index update:" + mutations + " to table: "
+                                    + tableReference);
                         }
                         // if the client can retry index writes, then we don't need to retry here
                         HTableFactory factory = clientVersion < MetaDataProtocol.MIN_CLIENT_RETRY_INDEX_WRITES ? retryingFactory : noRetriesFactory;
@@ -207,7 +209,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
 
         List<Boolean> results = null;
         try {
-            LOG.debug("Waiting on index update tasks to complete...");
+            LOGGER.debug("Waiting on index update tasks to complete...");
             results = this.pool.submitUninterruptible(tasks);
         } catch (ExecutionException e) {
             throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e);
@@ -240,7 +242,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
 
     @Override
     public void stop(String why) {
-        LOG.info("Shutting down " + this.getClass().getSimpleName());
+        LOGGER.info("Shutting down " + this.getClass().getSimpleName());
         this.pool.stop(why);
         this.retryingFactory.shutdown();
         this.noRetriesFactory.shutdown();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index f13616a..9cf7d96 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -29,8 +29,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -71,6 +69,8 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
@@ -85,7 +85,7 @@ import com.google.common.collect.Multimap;
  *
  */
 public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
-    private static final Log LOG = LogFactory.getLog(PhoenixIndexFailurePolicy.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexFailurePolicy.class);
     public static final String THROW_INDEX_WRITE_FAILURE = "THROW_INDEX_WRITE_FAILURE";
     public static final String DISABLE_INDEX_ON_WRITE_FAILURE = "DISABLE_INDEX_ON_WRITE_FAILURE";
     public static final String REBUILD_INDEX_ON_WRITE_FAILURE = "REBUILD_INDEX_ON_WRITE_FAILURE";
@@ -174,7 +174,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
             timestamp = handleFailureWithExceptions(attempted, cause);
             throwing = false;
         } catch (Throwable t) {
-            LOG.warn("handleFailure failed", t);
+            LOGGER.warn("handleFailure failed", t);
             super.handleFailure(attempted, cause);
             throwing = false;
         } finally {
@@ -188,7 +188,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 if (throwIndexWriteFailure) {
             		throw ioException;
             	} else {
-                    LOG.warn("Swallowing index write failure", ioException);
+                    LOGGER.warn("Swallowing index write failure", ioException);
             	}
             }
         }
@@ -282,24 +282,24 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                         MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
                                 systemTable, newState);
                         if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                            LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+                            LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
                             continue;
                         }
                         if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                             if (leaveIndexActive) {
-                                LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
                                         + result.getMutationCode());
                                 // If we're not disabling the index, then we don't want to throw as throwing
                                 // will lead to the RS being shutdown.
                                 if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
                                         "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
                             } else {
-                                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = "
+                                LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = "
                                         + result.getMutationCode() + ". Will use default failure policy instead.");
                                 throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                             }
                         }
-                        LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
+                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
                             + " due to an exception while writing updates. indexState=" + newState,
                         cause);
                     } catch (Throwable t) {
@@ -351,7 +351,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                                         mutation.getRow().length - offset));
                 String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
                 if (indexTableName == null) {
-                    LOG.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
+                    LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
                 } else {
                     indexTableNames.add(indexTableName);
                 }
@@ -437,7 +437,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 }
             }
         } catch (Exception handleE) {
-            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+            LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
         }
     }
 
@@ -532,7 +532,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 }
             }
         } catch (Exception handleE) {
-            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+            LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
         }
     }
 
@@ -571,11 +571,11 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
         decrementCounterForIndex(conn,indexFullName);
         Long indexDisableTimestamp = null;
         if (PIndexState.DISABLE.equals(indexState)) {
-            LOG.info("Disabling index after hitting max number of index write retries: "
+            LOGGER.info("Disabling index after hitting max number of index write retries: "
                     + indexFullName);
             IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp);
         } else if (PIndexState.ACTIVE.equals(indexState)) {
-            LOG.debug("Resetting index to active after subsequent success " + indexFullName);
+            LOGGER.debug("Resetting index to active after subsequent success " + indexFullName);
             //At server disabled timestamp will be reset only if there is no other client is in PENDING_DISABLE state
             indexDisableTimestamp = 0L;
             try {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index 73737b4..29f2fe3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -30,8 +30,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Optional;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -62,6 +60,8 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionType;
 import org.apache.phoenix.util.TransactionUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Do all the work of managing local index updates for a transactional table from a single coprocessor. Since the transaction
@@ -71,7 +71,7 @@ import org.apache.phoenix.util.TransactionUtil;
  */
 public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoprocessor {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixTransactionalIndexer.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransactionalIndexer.class);
 
     // Hack to get around not being able to save any state between
     // coprocessor calls. TODO: remove after HBASE-18127 when available
@@ -203,7 +203,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
             TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size());
         } catch (Throwable t) {
             String msg = "Failed to update index with entries:" + indexUpdates;
-            LOG.error(msg, t);
+            LOGGER.error(msg, t);
             ServerUtil.throwIOException(msg, t);
         }
     }
@@ -230,7 +230,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
             }
         } catch (Throwable t) {
             String msg = "Failed to write index updates:" + context.indexUpdates;
-            LOG.error(msg, t);
+            LOGGER.error(msg, t);
             ServerUtil.throwIOException(msg, t);
          } finally {
              removeBatchMutateContext(c);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index a562b8d..c9899a9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -143,7 +143,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class BaseResultIterators extends ExplainTable implements ResultIterators {
-	public static final Logger logger = LoggerFactory.getLogger(BaseResultIterators.class);
+	public static final Logger LOGGER = LoggerFactory.getLogger(BaseResultIterators.class);
     private static final int ESTIMATED_GUIDEPOSTS_PER_REGION = 20;
     private static final int MIN_SEEK_TO_COLUMN_VERSION = VersionUtil.encodeVersion("0", "98", "12");
     private final List<List<Scan>> scans;
@@ -1226,8 +1226,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
      */
     @Override
     public List<PeekingResultIterator> getIterators() throws SQLException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this,
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this,
                     ScanUtil.getCustomAnnotations(scan)));
         }
         boolean isReverse = ScanUtil.isReversed(scan);
@@ -1313,7 +1313,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                             Scan oldScan = scanPair.getFirst();
                             byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
                             if(e2 instanceof HashJoinCacheNotFoundException){
-                                logger.debug(
+                                LOGGER.debug(
                                         "Retrying when Hash Join cache is not found on the server ,by sending the cache again");
                                 if(retryCount<=0){
                                     throw e2;
@@ -1454,7 +1454,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                     Thread.currentThread().interrupt();
                     throw new RuntimeException(e);
                 } catch (ExecutionException e) {
-                    logger.info("Failed to execute task during cancel", e);
+                    LOGGER.info("Failed to execute task during cancel", e);
                     continue;
                 }
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index 1aab2d5..2fb7b72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -54,7 +54,7 @@ import com.google.common.base.Preconditions;
  */
 @Deprecated
 public class ChunkedResultIterator implements PeekingResultIterator {
-    private static final Logger logger = LoggerFactory.getLogger(ChunkedResultIterator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ChunkedResultIterator.class);
 
     private final ParallelIteratorFactory delegateIteratorFactory;
     private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
@@ -89,7 +89,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
 
         @Override
         public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException {
-            if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
             return new ChunkedResultIterator(delegateFactory, mutationState, context, tableRef, scan, 
                     mutationState.getConnection().getQueryServices().getProps().getLong(
                                 QueryServices.SCAN_RESULT_CHUNK_SIZE,
@@ -110,7 +110,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
         // Instantiate single chunk iterator and the delegate iterator in constructor
         // to get parallel scans kicked off in separate threads. If we delay this,
         // we'll get serialized behavior (see PHOENIX-
-        if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+        if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
         ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(scanner, chunkSize);
         String tableName = tableRef.getTable().getPhysicalName().getString();
         resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName, plan);
@@ -149,7 +149,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
             } else {
                 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
             }
-            if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
             String tableName = tableRef.getTable().getPhysicalName().getString();
             ReadMetricQueue readMetrics = context.getReadMetricsQueue();
             ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 262ae44..3d5c96b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -52,7 +52,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class ParallelIterators extends BaseResultIterators {
-	private static final Logger logger = LoggerFactory.getLogger(ParallelIterators.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(ParallelIterators.class);
 	private static final String NAME = "PARALLEL";
     private final ParallelIteratorFactory iteratorFactory;
     private final boolean initFirstScanOnly;
@@ -122,8 +122,8 @@ public class ParallelIterators extends BaseResultIterators {
                 @Override
                 public PeekingResultIterator call() throws Exception {
                     long startTime = System.currentTimeMillis();
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
                     }
                     PeekingResultIterator iterator = iteratorFactory.newIterator(context, tableResultItr, scan, physicalTableName, ParallelIterators.this.plan);
                     if (initFirstScanOnly) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
index 5624f5f..bc77c98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
@@ -49,7 +49,7 @@ import com.google.common.base.Throwables;
  */
 public class RoundRobinResultIterator implements ResultIterator {
 
-    private static final Logger logger = LoggerFactory.getLogger(RoundRobinResultIterator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RoundRobinResultIterator.class);
 
     private final int threshold;
 
@@ -223,8 +223,8 @@ public class RoundRobinResultIterator implements ResultIterator {
             final ConnectionQueryServices services = context.getConnection().getQueryServices();
             ExecutorService executor = services.getExecutor();
             numParallelFetches++;
-            if (logger.isDebugEnabled()) {
-                logger.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
             }
             for (final RoundRobinIterator itr : openIterators) {
                 Future<Tuple> future = executor.submit(new Callable<Tuple>() {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index c25b516..f15c570 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -22,8 +22,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -57,6 +55,8 @@ import org.apache.phoenix.schema.stats.StatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsWriter;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Properties;
 
@@ -65,7 +65,7 @@ import java.util.Properties;
  */
 public class SnapshotScanner extends AbstractClientScanner {
 
-  private static final Log LOG = LogFactory.getLog(SnapshotScanner.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(SnapshotScanner.class);
   private final Scan scan;
   private RegionScanner scanner;
   private HRegion region;
@@ -75,7 +75,7 @@ public class SnapshotScanner extends AbstractClientScanner {
   public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir,
       TableDescriptor htd, RegionInfo hri,  Scan scan) throws Throwable{
 
-    LOG.info("Creating SnapshotScanner for region: " + hri);
+    LOGGER.info("Creating SnapshotScanner for region: " + hri);
 
     scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
     values = new ArrayList<>();
@@ -129,7 +129,7 @@ public class SnapshotScanner extends AbstractClientScanner {
         this.scanner.close();
         this.scanner = null;
       } catch (IOException e) {
-        LOG.warn("Exception while closing scanner", e);
+        LOGGER.warn("Exception while closing scanner", e);
       }
     }
     if (this.region != null) {
@@ -138,7 +138,7 @@ public class SnapshotScanner extends AbstractClientScanner {
         this.region.close(true);
         this.region = null;
       } catch (IOException e) {
-        LOG.warn("Exception while closing scanner", e);
+        LOGGER.warn("Exception while closing scanner", e);
       }
     }
   }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
index 267aa1d..3456ff2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
@@ -79,7 +79,7 @@ public class TableResultIterator implements ResultIterator {
     private final long renewLeaseThreshold;
     private final QueryPlan plan;
     private final ParallelScanGrouper scanGrouper;
-    private static final Logger logger = LoggerFactory.getLogger(TableResultIterator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TableResultIterator.class);
     private Tuple lastTuple = null;
     private ImmutableBytesWritable ptr = new ImmutableBytesWritable();
     @GuardedBy("renewLeaseLock")
@@ -187,7 +187,7 @@ public class TableResultIterator implements ResultIterator {
                             }
                         }
                         plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getName());
-                        logger.debug(
+                        LOGGER.debug(
                                 "Retrying when Hash Join cache is not found on the server ,by sending the cache again");
                         if (retry <= 0) {
                             throw e1;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
index 99d71b5..17c8e36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
@@ -24,8 +24,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -40,7 +38,8 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ServerUtil;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Iterator to scan over a HBase snapshot based on input HBase Scan object.
@@ -52,7 +51,7 @@ import org.apache.phoenix.util.ServerUtil;
  */
 public class TableSnapshotResultIterator implements ResultIterator {
 
-  private static final Log LOG = LogFactory.getLog(TableSnapshotResultIterator.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TableSnapshotResultIterator.class);
 
   private final Scan scan;
   private ResultIterator scanIterator;
@@ -101,7 +100,7 @@ public class TableSnapshotResultIterator implements ResultIterator {
     }
 
     this.regions.sort(RegionInfo.COMPARATOR);
-    LOG.info("Initialization complete with " + regions.size() + " valid regions");
+    LOGGER.info("Initialization complete with " + regions.size() + " valid regions");
   }
 
   /**
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 67ac9c9..8ac5375 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -61,7 +61,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * @since 0.1
  */
 public final class PhoenixDriver extends PhoenixEmbeddedDriver {
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixDriver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixDriver.class);
     public static final PhoenixDriver INSTANCE;
     private static volatile String driverShutdownMsg;
     static {
@@ -100,11 +100,11 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
                             // policy). We don't care about any exceptions, we're going down anyways.
                             future.get(millisBeforeShutdown, TimeUnit.MILLISECONDS);
                         } catch (ExecutionException e) {
-                            logger.warn("Failed to close instance", e);
+                            LOGGER.warn("Failed to close instance", e);
                         } catch (InterruptedException e) {
-                            logger.warn("Interrupted waiting to close instance", e);
+                            LOGGER.warn("Interrupted waiting to close instance", e);
                         } catch (TimeoutException e) {
-                            logger.warn("Timed out waiting to close instance", e);
+                            LOGGER.warn("Timed out waiting to close instance", e);
                         } finally {
                             // We're going down, but try to clean up.
                             svc.shutdownNow();
@@ -116,7 +116,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
                 // Don't want to register it if we're already in the process of going down.
                 DriverManager.registerDriver(INSTANCE);
             } catch (IllegalStateException e) {
-                logger.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
+                LOGGER.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
 
                 // Close the instance now because we don't have the shutdown hook
                 closeInstance(INSTANCE);
@@ -132,7 +132,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
         try {
             instance.close();
         } catch (SQLException e) {
-            logger.warn("Unable to close PhoenixDriver on shutdown", e);
+            LOGGER.warn("Unable to close PhoenixDriver on shutdown", e);
         } finally {
             driverShutdownMsg = "Phoenix driver closed because server is shutting down";
         }
@@ -156,14 +156,14 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
                 @Override
                 public void onRemoval(RemovalNotification<ConnectionInfo, ConnectionQueryServices> notification) {
                     String connInfoIdentifier = notification.getKey().toString();
-                    logger.debug("Expiring " + connInfoIdentifier + " because of "
+                    LOGGER.debug("Expiring " + connInfoIdentifier + " because of "
                         + notification.getCause().name());
 
                     try {
                         notification.getValue().close();
                     }
                     catch (SQLException se) {
-                        logger.error("Error while closing expired cache connection " + connInfoIdentifier, se);
+                        LOGGER.error("Error while closing expired cache connection " + connInfoIdentifier, se);
                     }
                 }
             };
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 2669360..0ed9ecd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -33,8 +33,6 @@ import java.util.logging.Logger;
 
 import javax.annotation.concurrent.Immutable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.security.User;
@@ -69,7 +67,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
     /**
      * The protocol for Phoenix Network Client 
      */ 
-    private static final Log LOG = LogFactory.getLog(PhoenixEmbeddedDriver.class);
+    private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(PhoenixEmbeddedDriver.class);
     private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//";
     private final static String DRIVER_NAME = "PhoenixEmbeddedDriver";
     private static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -197,7 +195,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
      * @since 0.1.1
      */
     public static class ConnectionInfo {
-        private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ConnectionInfo.class);
+        private static final org.slf4j.Logger LOGGER =
+                LoggerFactory.getLogger(ConnectionInfo.class);
         private static final Object KERBEROS_LOGIN_LOCK = new Object();
         private static final char WINDOWS_SEPARATOR_CHAR = '\\';
         private static final String REALM_EQUIVALENCY_WARNING_MSG = "Provided principal does not contan a realm and the default realm cannot be determined. Ignoring realm equivalency check.";
@@ -378,23 +377,25 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                                 currentUser = UserGroupInformation.getCurrentUser();
                                 if (!currentUser.hasKerberosCredentials() || !isSameName(currentUser.getUserName(), principal)) {
                                     final Configuration config = getConfiguration(props, info, principal, keytab);
-                                    logger.info("Trying to connect to a secure cluster as {} with keytab {}", config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
+                                    LOGGER.info("Trying to connect to a secure cluster as {} " +
+                                                    "with keytab {}",
+                                            config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
                                             config.get(QueryServices.HBASE_CLIENT_KEYTAB));
                                     UserGroupInformation.setConfiguration(config);
                                     User.login(config, QueryServices.HBASE_CLIENT_KEYTAB, QueryServices.HBASE_CLIENT_PRINCIPAL, null);
-                                    logger.info("Successful login to secure cluster");
+                                    LOGGER.info("Successful login to secure cluster");
                                 }
                             }
                         } else {
                             // The user already has Kerberos creds, so there isn't anything to change in the ConnectionInfo.
-                            logger.debug("Already logged in as {}", currentUser);
+                            LOGGER.debug("Already logged in as {}", currentUser);
                         }
                     } catch (IOException e) {
                         throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
                             .setRootCause(e).build().buildException();
                     }
                 } else {
-                    logger.debug("Principal and keytab not provided, not attempting Kerberos login");
+                    LOGGER.debug("Principal and keytab not provided, not attempting Kerberos login");
                 }
             } // else, no connection, no need to login
             // Will use the current User from UGI
@@ -416,12 +417,12 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
             try {
                 return KerberosUtil.getDefaultRealm();
             } catch (Exception e) {
-                if (LOG.isDebugEnabled()) {
+                if (LOGGER.isDebugEnabled()) {
                     // Include the stacktrace at DEBUG
-                    LOG.debug(REALM_EQUIVALENCY_WARNING_MSG, e);
+                    LOGGER.debug(REALM_EQUIVALENCY_WARNING_MSG, e);
                 } else {
                     // Limit the content at WARN
-                    LOG.warn(REALM_EQUIVALENCY_WARNING_MSG);
+                    LOGGER.warn(REALM_EQUIVALENCY_WARNING_MSG);
                 }
             }
             return null;
@@ -633,7 +634,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                 throw getMalFormedUrlException(url);
             }
             String znodeParent = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            LOG.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
+            LOGGER.debug("Getting default jdbc connection url " + quorum + ":" +
+                    port + ":" + znodeParent);
             return new ConnectionInfo(quorum, port, znodeParent);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index 016aa8f..3a82af2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -51,8 +51,6 @@ import java.util.Map;
 import com.google.common.primitives.Bytes;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -94,6 +92,8 @@ import org.apache.phoenix.schema.types.PTinyint;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.SQLCloseable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Throwables;
@@ -127,7 +127,7 @@ import org.apache.phoenix.util.SchemaUtil;
  */
 public class PhoenixResultSet implements ResultSet, SQLCloseable {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixResultSet.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixResultSet.class);
 
     private final static String STRING_FALSE = "0";
     private final static BigDecimal BIG_DECIMAL_FALSE = BigDecimal.valueOf(0);
@@ -922,7 +922,7 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable {
 
     @Override
     public void setFetchSize(int rows) throws SQLException {
-        LOG.warn("Ignoring setFetchSize(" + rows + ")");
+        LOGGER.warn("Ignoring setFetchSize(" + rows + ")");
     }
 
     @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index b628a00..95ae1e3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -214,7 +214,7 @@ import com.google.common.math.IntMath;
  */
 public class PhoenixStatement implements Statement, SQLCloseable {
 	
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixStatement.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatement.class);
     
     public enum Operation {
         QUERY("queried", false),
@@ -311,9 +311,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                          // this will create its own trace internally, so we don't wrap this
                          // whole thing in tracing
                         ResultIterator resultIterator = plan.iterator();
-                        if (logger.isDebugEnabled()) {
+                        if (LOGGER.isDebugEnabled()) {
                             String explainPlan = QueryUtil.getExplainPlan(resultIterator);
-                            logger.debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection));
+                            LOGGER.debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection));
                         }
                         StatementContext context = plan.getContext();
                         context.setQueryLogger(queryLogger);
@@ -338,8 +338,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                     //Force update cache and retry if meta not found error occurs
                     catch (MetaDataEntityNotFoundException e) {
                         if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
-                            if(logger.isDebugEnabled())
-                                logger.debug("Reloading table "+ e.getTableName()+" data from server");
+                            if(LOGGER.isDebugEnabled())
+                                LOGGER.debug("Reloading table "+ e.getTableName()+" data from server");
                             if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
                                 e.getSchemaName(), e.getTableName(), true).wasUpdated()){
                                 //TODO we can log retry count and error for debugging in LOG table
@@ -424,8 +424,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                             //Force update cache and retry if meta not found error occurs
                             catch (MetaDataEntityNotFoundException e) {
                                 if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
-                                    if(logger.isDebugEnabled())
-                                        logger.debug("Reloading table "+ e.getTableName()+" data from server");
+                                    if(LOGGER.isDebugEnabled())
+                                        LOGGER.debug("Reloading table "+ e.getTableName()+" data from server");
                                     if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
                                         e.getSchemaName(), e.getTableName(), true).wasUpdated()){
                                         return executeMutation(stmt, false);
@@ -1762,8 +1762,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
     }
 
     public MutationPlan compileMutation(String sql) throws SQLException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection));
         }
         CompilableStatement stmt = parseStatement(sql);
         return compileMutation(stmt, sql);
@@ -1795,8 +1795,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
     
     @Override
     public ResultSet executeQuery(String sql) throws SQLException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Execute query: " + sql, connection));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Execute query: " + sql, connection));
         }
         
         CompilableStatement stmt = parseStatement(sql);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
index ef5559c..27d4ba4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
@@ -20,11 +20,11 @@ package org.apache.phoenix.log;
 import java.util.Map;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
@@ -41,7 +41,7 @@ public class QueryLogger {
     private LogLevel logLevel;
     private Builder<QueryLogInfo, Object> queryLogBuilder = ImmutableMap.builder();
     private boolean isSynced;
-    private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLogger.class);
     
     private QueryLogger(PhoenixConnection connection) {
         this.queryId = UUID.randomUUID().toString();
@@ -105,15 +105,15 @@ public class QueryLogger {
         try {
             queryLogBuilder.put(queryLogInfo, info);
         } catch (Exception e) {
-            LOG.warn("Unable to add log info because of " + e.getMessage());
+            LOGGER.warn("Unable to add log info because of " + e.getMessage());
         }
     }
     
     private boolean publishLogs(RingBufferEventTranslator translator) {
         if (queryDisruptor == null) { return false; }
         boolean isLogged = queryDisruptor.tryPublish(translator);
-        if (!isLogged && LOG.isDebugEnabled()) {
-            LOG.debug("Unable to write query log in table as ring buffer queue is full!!");
+        if (!isLogged && LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Unable to write query log in table as ring buffer queue is full!!");
         }
         return isLogged;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index 1f2240e..c4f227a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -24,10 +24,10 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.QueryServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.lmax.disruptor.BlockingWaitStrategy;
@@ -44,7 +44,7 @@ public class QueryLoggerDisruptor implements Closeable{
     private boolean isClosed = false;
     //number of elements to create within the ring buffer.
     private static final int RING_BUFFER_SIZE = 8 * 1024;
-    private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
     private static final String DEFAULT_WAIT_STRATEGY = BlockingWaitStrategy.class.getName();
     
     public QueryLoggerDisruptor(Configuration configuration) throws SQLException{
@@ -76,7 +76,7 @@ public class QueryLoggerDisruptor implements Closeable{
 
         final QueryLogDetailsEventHandler[] handlers = { new QueryLogDetailsEventHandler(configuration) };
         disruptor.handleEventsWith(handlers);
-        LOG.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
+        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
                 + ", waitStrategy=" + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
                 + errorHandler + "...");
         disruptor.start();
@@ -103,7 +103,7 @@ public class QueryLoggerDisruptor implements Closeable{
     @Override
     public void close() throws IOException {
         isClosed = true;
-        LOG.info("Shutting down QueryLoggerDisruptor..");
+        LOGGER.info("Shutting down QueryLoggerDisruptor..");
         try {
             //we can wait for 2 seconds, so that backlog can be committed
             disruptor.shutdown(2, TimeUnit.SECONDS);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
index 0209951..6a7c0b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/TableLogWriter.java
@@ -27,11 +27,11 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -40,7 +40,7 @@ import com.google.common.collect.ImmutableMap;
  * 
  */
 public class TableLogWriter implements LogWriter {
-    private static final Log LOG = LogFactory.getLog(LogWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LogWriter.class);
     private Connection connection;
     private boolean isClosed;
     private PreparedStatement upsertStatement;
@@ -84,7 +84,7 @@ public class TableLogWriter implements LogWriter {
     @Override
     public void write(RingBufferEvent event) throws SQLException, IOException, ClassNotFoundException {
         if (isClosed()) {
-            LOG.warn("Unable to commit query log as Log committer is already closed");
+            LOGGER.warn("Unable to commit query log as Log committer is already closed");
             return;
         }
         if (connection == null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index cc6feb3..18e0305 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -76,7 +76,7 @@ import com.google.common.collect.Lists;
  */
 public abstract class AbstractBulkLoadTool extends Configured implements Tool {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(AbstractBulkLoadTool.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractBulkLoadTool.class);
 
     static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)");
     static final Option INPUT_PATH_OPT = new Option("i", "input", true, "Input path(s) (comma-separated, mandatory)");
@@ -195,10 +195,10 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job.
             String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt());
             PhoenixDriver.ConnectionInfo info = PhoenixDriver.ConnectionInfo.create(zkQuorum);
-            LOG.info("Configuring HBase connection to {}", info);
+            LOGGER.info("Configuring HBase connection to {}", info);
             for (Map.Entry<String,String> entry : info.asProps()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Setting {} = {}", entry.getKey(), entry.getValue());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Setting {} = {}", entry.getKey(), entry.getValue());
                 }
                 conf.set(entry.getKey(), entry.getValue());
             }
@@ -209,8 +209,8 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
         }
 
         final Connection conn = QueryUtil.getConnection(conf);
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
                     qualifiedTableName);
         }
         List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName);
@@ -318,7 +318,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             // give subclasses their hook
             setupJob(job);
 
-            LOG.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
+            LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
             boolean success = job.waitForCompletion(true);
 
             if (success) {
@@ -328,7 +328,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
                                 TableName.valueOf(qualifiedTableName));
                         if(!IndexUtil.matchingSplitKeys(splitKeysBeforeJob,
                                 regionLocator.getStartKeys())) {
-                            LOG.error("The table " + qualifiedTableName + " has local indexes and"
+                            LOGGER.error("The table " + qualifiedTableName + " has local indexes and"
                                     + " there is split key mismatch before and after running"
                                     + " bulkload job. Please rerun the job otherwise there may be"
                                     + " inconsistencies between actual data and index data.");
@@ -338,11 +338,11 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
                         if (regionLocator != null) regionLocator.close();
                     }
                 }
-                LOG.info("Loading HFiles from {}", outputPath);
+                LOGGER.info("Loading HFiles from {}", outputPath);
                 completebulkload(conf,outputPath,tablesToBeLoaded);
-                LOG.info("Removing output directory {}", outputPath);
+                LOGGER.info("Removing output directory {}", outputPath);
                 if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
-                    LOG.error("Failed to delete the output directory {}", outputPath);
+                    LOGGER.error("Failed to delete the output directory {}", outputPath);
                 }
                 return 0;
             } else {
@@ -364,10 +364,10 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
             try(org.apache.hadoop.hbase.client.Connection hbaseConn =
                     ConnectionFactory.createConnection(conf);
                     Table htable = hbaseConn.getTable(TableName.valueOf(tableName))) {
-                LOG.info("Loading HFiles for {} from {}", tableName , tableOutputPath);
+                LOGGER.info("Loading HFiles for {} from {}", tableName , tableOutputPath);
                 loader.doBulkLoad(tableOutputPath, hbaseConn.getAdmin(), htable,
                         hbaseConn.getRegionLocator(TableName.valueOf(tableName)));
-                LOG.info("Incremental load complete for table=" + tableName);
+                LOGGER.info("Incremental load complete for table=" + tableName);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index 6d2f719..2940672 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -76,7 +76,7 @@ import com.google.common.collect.Lists;
 public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWritable, Text, TableRowkeyPair,
         ImmutableBytesWritable> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
 
     protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import";
 
@@ -394,7 +394,7 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
 
         @Override
         public void errorOnRecord(T record, Throwable throwable) {
-            LOG.error("Error on record " + record, throwable);
+            LOGGER.error("Error on record " + record, throwable);
             context.getCounter(COUNTER_GROUP_NAME, "Errors on records").increment(1L);
             if (!ignoreRecordErrors) {
                 Throwables.propagate(throwable);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
index eb417a7..0cc89f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
@@ -59,7 +59,7 @@ import org.slf4j.LoggerFactory;
 public class FormatToKeyValueReducer
         extends Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(FormatToKeyValueReducer.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToKeyValueReducer.class);
 
 
     protected List<String> tableNames;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 1103f3e..f4458ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -90,7 +90,7 @@ import com.google.common.collect.Sets;
  */
 public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Cell> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(MultiHfileOutputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultiHfileOutputFormat.class);
 
     private static final String COMPRESSION_FAMILIES_CONF_KEY =
         "hbase.hfileoutputformat.families.compression";
@@ -204,7 +204,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
           private void rollWriters() throws IOException {
               for (WriterLength wl : this.writers.values()) {
                   if (wl.writer != null) {
-                      LOG.info("Writer=" + wl.writer.getPath() +
+                      LOGGER.info("Writer=" + wl.writer.getPath() +
                               ((wl.written == 0)? "": ", wrote=" + wl.written));
                       close(wl.writer);
                   }
@@ -478,7 +478,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
     private static void writePartitions(Configuration conf, Path partitionsPath,
             Set<TableRowkeyPair> tablesStartKeys) throws IOException {
         
-        LOG.info("Writing partition information to " + partitionsPath);
+        LOGGER.info("Writing partition information to " + partitionsPath);
         if (tablesStartKeys.isEmpty()) {
           throw new IllegalArgumentException("No regions passed");
         }
@@ -700,11 +700,11 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
                conf.set(tableName, tableDefns);
                
                TargetTableRef tbl = TargetTableRefFunctions.FROM_JSON.apply(tableDefns);
-               LOG.info(" the table logical name is "+ tbl.getLogicalName());
+               LOGGER.info(" the table logical name is "+ tbl.getLogicalName());
            }
        }
     
-       LOG.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
+       LOGGER.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
        job.setNumReduceTasks(tablesStartKeys.size());
 
        configurePartitioner(job, tablesStartKeys);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index fba01a3..db58deb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -79,7 +79,7 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class OrphanViewTool extends Configured implements Tool {
-    private static final Logger LOG = LoggerFactory.getLogger(OrphanViewTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewTool.class);
     // Query all the views that are not "MAPPED" views
     private static final String viewQuery = "SELECT " +
             TENANT_ID + ", " +
@@ -416,7 +416,7 @@ public class OrphanViewTool extends Configured implements Tool {
                         new DropTableStatement(pTableName, PTableType.VIEW, false, true, true));
             }
             catch (TableNotFoundException e) {
-                LOG.info("Ignoring view " + pTableName + " as it has already been dropped");
+                LOGGER.info("Ignoring view " + pTableName + " as it has already been dropped");
             }
         } finally {
             if (newConn) {
@@ -805,7 +805,7 @@ public class OrphanViewTool extends Configured implements Tool {
                 connection.close();
             }
         } catch (SQLException sqlE) {
-            LOG.error("Failed to close connection: ", sqlE);
+            LOGGER.error("Failed to close connection: ", sqlE);
             throw new RuntimeException("Failed to close connection with exception: ", sqlE);
         }
     }
@@ -883,7 +883,7 @@ public class OrphanViewTool extends Configured implements Tool {
             }
             return 0;
         } catch (Exception ex) {
-            LOG.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
+            LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
                     ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index 60e6370..732aaf8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -19,8 +19,6 @@ package org.apache.phoenix.mapreduce;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
@@ -46,6 +44,8 @@ import org.apache.phoenix.query.HBaseFactoryProvider;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.schema.stats.StatisticsUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.sql.Connection;
@@ -61,7 +61,7 @@ import java.util.Properties;
  */
 public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWritable,T> {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixInputFormat.class);
        
     /**
      * instantiated by framework
@@ -120,8 +120,8 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
 
             if(splitByStats) {
                 for(Scan aScan: scans) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
                                 .getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" +
                                 aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan
                                 .getBatch() + "] and  regionLocation : " + regionLocation);
@@ -130,18 +130,18 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
                     psplits.add(new PhoenixInputSplit(Collections.singletonList(aScan), regionSize, regionLocation));
                 }
                 } else {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
                             .get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans
                             .size() - 1).getStopRow()));
-                    LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
+                    LOGGER.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
                             .get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " +
                             "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks()
                             + ", " + scans.get(0).getBatch() + "] and  regionLocation : " +
                             regionLocation);
 
                     for (int i = 0, limit = scans.size(); i < limit; i++) {
-                        LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
+                        LOGGER.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
                                 .toStringBinary(scans.get(i).getAttribute
                                         (BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
                     }
@@ -217,7 +217,7 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
               return queryPlan;
             }
         } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",
+            LOGGER.error(String.format("Failed to get the query plan with error [%s]",
                 exception.getMessage()));
             throw new RuntimeException(exception);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
index 4217e40..055ce1f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java
@@ -22,8 +22,6 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -31,13 +29,15 @@ import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * {@link OutputFormat} implementation for Phoenix.
  *
  */
 public class PhoenixOutputFormat <T extends DBWritable> extends OutputFormat<NullWritable,T> {
-    private static final Log LOG = LogFactory.getLog(PhoenixOutputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixOutputFormat.class);
     private final Set<String> propsToIgnore;
     
     public PhoenixOutputFormat() {
@@ -65,7 +65,7 @@ public class PhoenixOutputFormat <T extends DBWritable> extends OutputFormat<Nul
         try {
             return new PhoenixRecordWriter<T>(context.getConfiguration(), propsToIgnore);
         } catch (SQLException e) {
-            LOG.error("Error calling PhoenixRecordWriter "  + e.getMessage());
+            LOGGER.error("Error calling PhoenixRecordWriter "  + e.getMessage());
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index 4b4fd1b..41f6a18 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -22,8 +22,6 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Scan;
@@ -51,6 +49,8 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.monitoring.ReadMetricQueue;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.query.ConnectionQueryServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
@@ -61,7 +61,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<NullWritable,T> {
     
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordReader.class);
     protected final Configuration  configuration;
     protected final QueryPlan queryPlan;
     private NullWritable key =  NullWritable.get();
@@ -84,7 +84,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
            try {
                resultIterator.close();
         } catch (SQLException e) {
-           LOG.error(" Error closing resultset.");
+           LOGGER.error(" Error closing resultset.");
            throw new RuntimeException(e);
         }
        }
@@ -110,7 +110,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
         final PhoenixInputSplit pSplit = (PhoenixInputSplit)split;
         final List<Scan> scans = pSplit.getScans();
         try {
-            LOG.info("Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange());
+            LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange());
             List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
             StatementContext ctx = queryPlan.getContext();
             ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
@@ -136,7 +136,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
                   final TableSnapshotResultIterator tableSnapshotResultIterator = new TableSnapshotResultIterator(configuration, scan,
                       scanMetricsHolder);
                     peekingResultIterator = LookAheadResultIterator.wrap(tableSnapshotResultIterator);
-                    LOG.info("Adding TableSnapshotResultIterator for scan: " + scan);
+                    LOGGER.info("Adding TableSnapshotResultIterator for scan: " + scan);
                 } else {
                   final TableResultIterator tableResultIterator =
                       new TableResultIterator(
@@ -144,7 +144,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
                           scanMetricsHolder, renewScannerLeaseThreshold, queryPlan,
                           MapReduceParallelScanGrouper.getInstance());
                   peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator);
-                  LOG.info("Adding TableResultIterator for scan: " + scan);
+                  LOGGER.info("Adding TableResultIterator for scan: " + scan);
                 }
                 iterators.add(peekingResultIterator);
             }
@@ -158,7 +158,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
 
             this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
         } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
             Throwables.propagate(e);
         }
    }
@@ -179,7 +179,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
             value.readFields(resultSet);
             return true;
         } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
index b67ba74..6f5b84e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
@@ -24,8 +24,6 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.RecordWriter;
@@ -33,6 +31,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.db.DBWritable;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Default {@link RecordWriter} implementation from Phoenix
@@ -40,7 +40,7 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
  */
 public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<NullWritable, T> {
     
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordWriter.class);
     
     private final Connection conn;
     private final PreparedStatement statement;
@@ -73,7 +73,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
         try {
             conn.commit();
          } catch (SQLException e) {
-             LOG.error("SQLException while performing the commit for the task.");
+             LOGGER.error("SQLException while performing the commit for the task.");
              throw new RuntimeException(e);
           } finally {
             try {
@@ -81,7 +81,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
               conn.close();
             }
             catch (SQLException ex) {
-              LOG.error("SQLException while closing the connection for the task.");
+              LOGGER.error("SQLException while closing the connection for the task.");
               throw new RuntimeException(ex);
             }
           }
@@ -94,7 +94,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
             numRecords++;
             statement.execute();
             if (numRecords % batchSize == 0) {
-                LOG.debug("commit called on a batch of size : " + batchSize);
+                LOGGER.debug("commit called on a batch of size : " + batchSize);
                 conn.commit();
             }
         } catch (SQLException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
index f8ec393..2beb10c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
@@ -22,8 +22,6 @@ import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -38,6 +36,8 @@ import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.schema.*;
 import org.apache.phoenix.util.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolDataTableName;
@@ -50,7 +50,8 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getInde
 public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends PhoenixInputFormat {
     QueryPlan queryPlan = null;
 
-    private static final Log LOG = LogFactory.getLog(PhoenixServerBuildIndexInputFormat.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
 
     /**
      * instantiated by framework
@@ -103,7 +104,7 @@ public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends Ph
             queryPlan.iterator(MapReduceParallelScanGrouper.getInstance());
             return queryPlan;
         } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",
+            LOGGER.error(String.format("Failed to get the query plan with error [%s]",
                     exception.getMessage()));
             throw new RuntimeException(exception);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java
index cc170f5..9f8080f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java
@@ -50,7 +50,7 @@ public class PhoenixTextInputFormat extends TextInputFormat {
   }
 
   public static class PhoenixLineRecordReader extends RecordReader<LongWritable,Text> {
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixLineRecordReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixLineRecordReader.class);
     private final LineRecordReader rr;
     private PhoenixLineRecordReader(LineRecordReader rr) {
       this.rr = rr;
@@ -62,10 +62,10 @@ public class PhoenixTextInputFormat extends TextInputFormat {
       final Configuration conf = context.getConfiguration();
       final FileSplit split = (FileSplit) genericSplit;
       if (conf.getBoolean(SKIP_HEADER_KEY, false) && split.getStart() == 0) {
-        LOG.trace("Consuming first key-value from {}", genericSplit);
+        LOGGER.trace("Consuming first key-value from {}", genericSplit);
         nextKeyValue();
       } else {
-        LOG.trace("Not configured to skip header or not the first input split: {}", split);
+        LOGGER.trace("Not configured to skip header or not the first input split: {}", split);
       }
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
index f63923d..38cff26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
@@ -46,7 +46,7 @@ import com.google.common.base.Preconditions;
  */
 public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(RegexToKeyValueMapper.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(RegexToKeyValueMapper.class);
 
     /** Configuration key for the regex */
     public static final String REGEX_CONFKEY = "phoenix.mapreduce.import.regex";
@@ -110,7 +110,9 @@ public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>
 			Map<String, Object> data = new HashMap<>();
 			Matcher m = inputPattern.matcher(input);
 			if (m.groupCount() != columnInfoList.size()) {
-				LOG.debug(String.format("based on the regex and input, input fileds %s size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
+                LOGGER.debug(String.format("based on the regex and input, input fileds %s size " +
+                        "doesn't match the table columns %s size",
+                        m.groupCount(), columnInfoList.size()));
 				return data;
 			}
 			
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
index b85a049..0a14627 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
@@ -42,7 +42,7 @@ import com.google.common.collect.Lists;
  * Writes mutations directly to HBase using HBase front-door APIs.
  */
 public class DirectHTableWriter {
-    private static final Logger LOG = LoggerFactory.getLogger(DirectHTableWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(DirectHTableWriter.class);
 
     private Configuration conf = null;
     private Table table;
@@ -63,9 +63,9 @@ public class DirectHTableWriter {
         try {
             this.conn = ConnectionFactory.createConnection(this.conf);
             this.table = conn.getTable(TableName.valueOf(tableName));
-            LOG.info("Created table instance for " + tableName);
+            LOGGER.info("Created table instance for " + tableName);
         } catch (IOException e) {
-            LOG.error("IOException : ", e);
+            LOGGER.error("IOException : ", e);
             tryClosingResourceSilently(this.conn);
             throw new RuntimeException(e);
         }
@@ -106,7 +106,7 @@ public class DirectHTableWriter {
             try {
                 res.close();
             } catch (IOException e) {
-                LOG.error("Closing resource: " + res + " failed with error: ", e);
+                LOGGER.error("Closing resource: " + res + " failed with error: ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
index 98d6bac..70183ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
@@ -59,7 +59,7 @@ import com.google.common.base.Joiner;
  */
 public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWritable, Text, Text> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexScrutinyMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyMapper.class);
     private Connection connection;
     private List<ColumnInfo> targetTblColumnMetadata;
     private long batchSize;
@@ -147,7 +147,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
                     PhoenixRuntime.generateColumnInfo(connection, qTargetTable, targetColNames);
             sourceTblColumnMetadata =
                     PhoenixRuntime.generateColumnInfo(connection, qSourceTable, sourceColNames);
-            LOG.info("Target table base query: " + targetTableQuery);
+            LOGGER.info("Target table base query: " + targetTableQuery);
             md5 = MessageDigest.getInstance("MD5");
         } catch (SQLException | NoSuchAlgorithmException e) {
             tryClosingResourceSilently(this.outputUpsertStmt);
@@ -162,7 +162,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
             try {
                 res.close();
             } catch (Exception e) {
-                LOG.error("Closing resource: " + res + " failed :", e);
+                LOGGER.error("Closing resource: " + res + " failed :", e);
             }
         }
     }
@@ -185,7 +185,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
             }
             context.progress(); // Make sure progress is reported to Application Master.
         } catch (SQLException | IllegalArgumentException e) {
-            LOG.error(" Error while read/write of a record ", e);
+            LOGGER.error(" Error while read/write of a record ", e);
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new IOException(e);
         }
@@ -201,7 +201,7 @@ public class IndexScrutinyMapper extends Mapper<NullWritable, PhoenixIndexDBWrit
                 processBatch(context);
                 connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
                 throwException = new IOException(e);
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index 39df6ac..9bde1f6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -78,7 +78,7 @@ import static org.apache.phoenix.util.MetaDataUtil.VIEW_INDEX_TABLE_PREFIX;
  */
 public class IndexScrutinyTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexScrutinyTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyTool.class);
 
     private static final Option SCHEMA_NAME_OPTION =
             new Option("s", "schema", true, "Phoenix schema name (optional)");
@@ -278,7 +278,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
             final String selectQuery =
                     QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, null,
                         Hint.NO_INDEX, true);
-            LOG.info("Query used on source table to feed the mapper: " + selectQuery);
+            LOGGER.info("Query used on source table to feed the mapper: " + selectQuery);
 
             PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat);
             // if outputting to table, setup the upsert to the output table
@@ -287,7 +287,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                         IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols,
                             targetDynamicCols, connection);
                 PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt);
-                LOG.info("Upsert statement used for output table: " + upsertStmt);
+                LOGGER.info("Upsert statement used for output table: " + upsertStmt);
             }
 
             final String jobName =
@@ -381,7 +381,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
             if (useTenantId) {
                 tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt());
                 configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
-                LOG.info(String.format("IndexScrutinyTool uses a tenantId %s", tenantId));
+                LOGGER.info(String.format("IndexScrutinyTool uses a tenantId %s", tenantId));
             }
             connection = ConnectionUtil.getInputConnection(configuration);
             final String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt());
@@ -437,7 +437,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                 }
             }
 
-            LOG.info(String.format(
+            LOGGER.info(String.format(
                 "Running scrutiny [schemaName=%s, dataTable=%s, indexTable=%s, useSnapshot=%s, timestamp=%s, batchSize=%s, outputBasePath=%s, outputFormat=%s, outputMaxRows=%s]",
                 schemaName, dataTable, indexTable, useSnapshot, ts, batchSize, basePath,
                 outputFormat, outputMaxRows));
@@ -457,13 +457,13 @@ public class IndexScrutinyTool extends Configured implements Tool {
             }
 
             if (!isForeground) {
-                LOG.info("Running Index Scrutiny in Background - Submit async and exit");
+                LOGGER.info("Running Index Scrutiny in Background - Submit async and exit");
                 for (Job job : jobs) {
                     job.submit();
                 }
                 return 0;
             }
-            LOG.info(
+            LOGGER.info(
                 "Running Index Scrutiny in Foreground. Waits for the build to complete. This may take a long time!.");
             boolean result = true;
             for (Job job : jobs) {
@@ -472,7 +472,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
 
             // write the results to the output metadata table
             if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) {
-                LOG.info("Writing results of jobs to output table "
+                LOGGER.info("Writing results of jobs to output table "
                         + IndexScrutinyTableOutput.OUTPUT_METADATA_TABLE_NAME);
                 IndexScrutinyTableOutput.writeJobResults(connection, args, jobs);
             }
@@ -480,11 +480,11 @@ public class IndexScrutinyTool extends Configured implements Tool {
             if (result) {
                 return 0;
             } else {
-                LOG.error("IndexScrutinyTool job failed! Check logs for errors..");
+                LOGGER.error("IndexScrutinyTool job failed! Check logs for errors..");
                 return -1;
             }
         } catch (Exception ex) {
-            LOG.error("An exception occurred while performing the indexing job: "
+            LOGGER.error("An exception occurred while performing the indexing job: "
                     + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
@@ -493,7 +493,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
                     connection.close();
                 }
             } catch (SQLException sqle) {
-                LOG.error("Failed to close connection ", sqle.getMessage());
+                LOGGER.error("Failed to close connection ", sqle.getMessage());
                 throw new RuntimeException("Failed to close connection");
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 10b57f4..baae79c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -115,7 +115,7 @@ import com.google.common.collect.Lists;
  */
 public class IndexTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IndexTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(IndexTool.class);
 
     private String schemaName;
     private String dataTable;
@@ -665,7 +665,7 @@ public class IndexTool extends Configured implements Tool {
                     int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt);
                     String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt());
                     double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt);
-                    LOG.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
+                    LOGGER.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
                     splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, samplingRate, configuration);
                 }
             }
@@ -682,11 +682,11 @@ public class IndexTool extends Configured implements Tool {
             job = jobFactory.getJob();
 
             if (!isForeground && useDirectApi) {
-                LOG.info("Running Index Build in Background - Submit async and exit");
+                LOGGER.info("Running Index Build in Background - Submit async and exit");
                 job.submit();
                 return 0;
             }
-            LOG.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
+            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
             boolean result = job.waitForCompletion(true);
             
             if (result) {
@@ -694,7 +694,7 @@ public class IndexTool extends Configured implements Tool {
                     if (isLocalIndexBuild) {
                         validateSplitForLocalIndex(splitKeysBeforeJob, regionLocator);
                     }
-                    LOG.info("Loading HFiles from {}", outputPath);
+                    LOGGER.info("Loading HFiles from {}", outputPath);
                     LoadIncrementalHFiles loader = new LoadIncrementalHFiles(configuration);
                     loader.doBulkLoad(outputPath, connection.unwrap(PhoenixConnection.class)
                             .getQueryServices().getAdmin(), htable, regionLocator);
@@ -705,11 +705,11 @@ public class IndexTool extends Configured implements Tool {
                 }
                 return 0;
             } else {
-                LOG.error("IndexTool job failed! Check logs for errors..");
+                LOGGER.error("IndexTool job failed! Check logs for errors..");
                 return -1;
             }
         } catch (Exception ex) {
-            LOG.error("An exception occurred while performing the indexing job: "
+            LOGGER.error("An exception occurred while performing the indexing job: "
                     + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
@@ -719,7 +719,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         connection.close();
                     } catch (SQLException e) {
-                        LOG.error("Failed to close connection ", e);
+                        LOGGER.error("Failed to close connection ", e);
                         rethrowException = true;
                     }
                 }
@@ -727,7 +727,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         htable.close();
                     } catch (IOException e) {
-                        LOG.error("Failed to close htable ", e);
+                        LOGGER.error("Failed to close htable ", e);
                         rethrowException = true;
                     }
                 }
@@ -735,7 +735,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         hConn.close();
                     } catch (IOException e) {
-                        LOG.error("Failed to close hconnection ", e);
+                        LOGGER.error("Failed to close hconnection ", e);
                         rethrowException = true;
                     }
                 }
@@ -743,7 +743,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         regionLocator.close();
                     } catch (IOException e) {
-                        LOG.error("Failed to close regionLocator ", e);
+                        LOGGER.error("Failed to close regionLocator ", e);
                         rethrowException = true;
                     }
                 }
@@ -751,7 +751,7 @@ public class IndexTool extends Configured implements Tool {
                     try {
                         jobFactory.closeConnection();
                     } catch (SQLException e) {
-                        LOG.error("Failed to close jobFactory ", e);
+                        LOGGER.error("Failed to close jobFactory ", e);
                         rethrowException = true;
                     }
                 }
@@ -793,7 +793,7 @@ public class IndexTool extends Configured implements Tool {
                         tempHConn.getRegionLocator(TableName.valueOf(qDataTable))) {
             numRegions = regionLocator.getStartKeys().length;
             if (autosplit && !(numRegions > autosplitNumRegions)) {
-                LOG.info(String.format(
+                LOGGER.info(String.format(
                     "Will not split index %s because the data table only has %s regions, autoSplitNumRegions=%s",
                     pIndexTable.getPhysicalName(), numRegions, autosplitNumRegions));
                 return; // do nothing if # of regions is too low
@@ -879,7 +879,7 @@ public class IndexTool extends Configured implements Tool {
             String errMsg = "The index to build is local index and the split keys are not matching"
                     + " before and after running the job. Please rerun the job otherwise"
                     + " there may be inconsistencies between actual data and index data";
-            LOG.error(errMsg);
+            LOGGER.error(errMsg);
             throw new Exception(errMsg);
         }
         return true;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
index 2dc7551..98ac5e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java
@@ -38,7 +38,7 @@ public class IndexToolUtil {
 
 	private static final String ALTER_INDEX_QUERY_TEMPLATE = "ALTER INDEX IF EXISTS %s ON %s %s";  
     
-	private static final Logger LOG = LoggerFactory.getLogger(IndexToolUtil.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(IndexToolUtil.class);
 	
 	/**
 	 * Updates the index state.
@@ -74,7 +74,7 @@ public class IndexToolUtil {
         Preconditions.checkNotNull(connection);
         final String alterQuery = String.format(ALTER_INDEX_QUERY_TEMPLATE,indexTable,masterTable,state.name());
         connection.createStatement().execute(alterQuery);
-        LOG.info(" Updated the status of the index {} to {} " , indexTable , state.name());
+        LOGGER.info(" Updated the status of the index {} to {} " , indexTable , state.name());
     }
 	
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
index e148f67..eca3a9e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
 public class PhoenixIndexImportDirectMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class);
 
     private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable();
 
@@ -100,7 +100,7 @@ public class PhoenixIndexImportDirectMapper extends
             //Get batch size in terms of bytes
             batchSizeBytes = ((PhoenixConnection) connection).getMutateBatchSizeBytes();
 
-            LOG.info("Mutation Batch Size = " + batchSize);
+            LOGGER.info("Mutation Batch Size = " + batchSize);
 
             final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration);
             this.pStatement = connection.prepareStatement(upsertQuery);
@@ -138,7 +138,7 @@ public class PhoenixIndexImportDirectMapper extends
             // Make sure progress is reported to Application Master.
             context.progress();
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount);
             throw new RuntimeException(e);
         }
@@ -176,7 +176,7 @@ public class PhoenixIndexImportDirectMapper extends
                 new IntWritable(0));
             super.cleanup(context);
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount);
             throw new RuntimeException(e);
         } finally {
@@ -189,7 +189,7 @@ public class PhoenixIndexImportDirectMapper extends
             try {
                 this.connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
         if (this.writer != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 57688fd..0813620 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -49,7 +49,7 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDU
 public class PhoenixIndexImportDirectReducer extends
         Reducer<ImmutableBytesWritable, IntWritable, NullWritable, NullWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
 
     @Override
     protected void cleanup(Context context) throws IOException, InterruptedException{
@@ -58,7 +58,7 @@ public class PhoenixIndexImportDirectReducer extends
 
             updateTasksTable(context);
         } catch (SQLException e) {
-            LOG.error(" Failed to update the status to Active");
+            LOGGER.error(" Failed to update the status to Active");
             throw new RuntimeException(e.getMessage());
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
index 5253bfd..567a642 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
@@ -58,7 +58,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, KeyValue> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexImportMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportMapper.class);
     
     private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable();
     
@@ -155,7 +155,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             }
             connection.rollback();
        } catch (SQLException e) {
-           LOG.error("Error {}  while read/write of a record ",e.getMessage());
+           LOGGER.error("Error {}  while read/write of a record ",e.getMessage());
            context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
            throw new RuntimeException(e);
         } 
@@ -172,7 +172,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             try {
                 connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index c79359d..aa7ea95 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
 
     private PhoenixConnection connection;
 
@@ -92,7 +92,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                     services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                         QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
             batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
-            LOG.info("Mutation Batch Size = " + batchSize);
+            LOGGER.info("Mutation Batch Size = " + batchSize);
             this.mutations = Lists.newArrayListWithExpectedSize(batchSize);
             maintainers=new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration));
         } catch (SQLException e) {
@@ -142,7 +142,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
             // Make sure progress is reported to Application Master.
             context.progress();
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         }
@@ -167,7 +167,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                 new IntWritable(0));
             super.cleanup(context);
         } catch (SQLException e) {
-            LOG.error(" Error {}  while read/write of a record ", e.getMessage());
+            LOGGER.error(" Error {}  while read/write of a record ", e.getMessage());
             context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
             throw new RuntimeException(e);
         } finally {
@@ -180,7 +180,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
             try {
                 this.connection.close();
             } catch (SQLException e) {
-                LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e);
+                LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e);
             }
         }
         if (this.writer != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
index 34bcc9b..0544d02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
 public class PhoenixServerBuildIndexMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
 
     @Override
     protected void setup(final Context context) throws IOException, InterruptedException {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 886da45..e00d6db 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -39,8 +39,6 @@ import java.util.concurrent.TimeoutException;
 
 import javax.security.auth.login.AppConfigurationEntry;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -60,6 +58,8 @@ import org.apache.phoenix.util.UpgradeUtil;
 import org.apache.phoenix.util.ZKBasedMasterElectionUtil;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
@@ -116,7 +116,7 @@ public class PhoenixMRJobSubmitter {
     private static final int JOB_SUBMIT_POOL_TIMEOUT = 5;
     private Configuration conf;
     private String zkQuorum;
-    private static final Log LOG = LogFactory.getLog(PhoenixMRJobSubmitter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobSubmitter.class);
 
     public PhoenixMRJobSubmitter() throws IOException {
         this(null);
@@ -158,11 +158,11 @@ public class PhoenixMRJobSubmitter {
 
         switch (type) {
         case CAPACITY:
-            LOG.info("Applying the Capacity Scheduler Queue Configurations");
+            LOGGER.info("Applying the Capacity Scheduler Queue Configurations");
             PhoenixMRJobUtil.updateCapacityQueueInfo(conf);
             break;
         case FAIR:
-            LOG.warn("Fair Scheduler type is not yet supported");
+            LOGGER.warn("Fair Scheduler type is not yet supported");
             throw new IOException("Fair Scheduler is not yet supported");
         case NONE:
         default:
@@ -184,7 +184,7 @@ public class PhoenixMRJobSubmitter {
         AppConfigurationEntry entries[] =
                 javax.security.auth.login.Configuration.getConfiguration()
                         .getAppConfigurationEntry("Client");
-        LOG.info("Security - Fetched App Login Configuration Entries");
+        LOGGER.info("Security - Fetched App Login Configuration Entries");
         if (entries != null) {
             for (AppConfigurationEntry entry : entries) {
                 if (entry.getOptions().get(PRINCIPAL) != null) {
@@ -194,12 +194,12 @@ public class PhoenixMRJobSubmitter {
                     keyTabPath = (String) entry.getOptions().get(KEYTAB);
                 }
             }
-            LOG.info("Security - Got Principal = " + principal + "");
+            LOGGER.info("Security - Got Principal = " + principal + "");
             if (principal != null && keyTabPath != null) {
-                LOG.info("Security - Retreiving the TGT with principal:" + principal
+                LOGGER.info("Security - Retreiving the TGT with principal:" + principal
                         + " and keytab:" + keyTabPath);
                 UserGroupInformation.loginUserFromKeytab(principal, keyTabPath);
-                LOG.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
+                LOGGER.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
                         + keyTabPath);
             }
         }
@@ -237,7 +237,7 @@ public class PhoenixMRJobSubmitter {
 
         if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
             AUTO_INDEX_BUILD_LOCK_NAME)) {
-            LOG.info("Some other node is already running Automated Index Build. Skipping execution!");
+            LOGGER.info("Some other node is already running Automated Index Build. Skipping execution!");
             return -1;
         }
         // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
@@ -247,22 +247,22 @@ public class PhoenixMRJobSubmitter {
 
         // Get Candidate indexes to be built
         Map<String, PhoenixAsyncIndex> candidateJobs = getCandidateJobs();
-        LOG.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
+        LOGGER.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs);
 
         // Get already scheduled Jobs list from Yarn Resource Manager
         Set<String> submittedJobs = getSubmittedYarnApps();
-        LOG.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
+        LOGGER.info("Already Submitted/Running MR index build jobs - " + submittedJobs);
 
         // Get final jobs to submit
         Set<PhoenixAsyncIndex> jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs);
 
-        LOG.info("Final indexes to be built - " + jobsToSchedule);
+        LOGGER.info("Final indexes to be built - " + jobsToSchedule);
         List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(jobsToSchedule.size());
 
         int failedJobSubmissionCount = 0;
         int timedoutJobSubmissionCount = 0;
         ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10);
-        LOG.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
+        LOGGER.info("Attempt to submit MR index build jobs for - " + jobsToSchedule);
 
         try {
             for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) {
@@ -285,7 +285,7 @@ public class PhoenixMRJobSubmitter {
             PhoenixMRJobUtil.shutdown(jobSubmitPool);
         }
 
-        LOG.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = "
+        LOGGER.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = "
                 + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount
                 + " ; Timed out = " + timedoutJobSubmissionCount);
         return failedJobSubmissionCount;
@@ -312,7 +312,7 @@ public class PhoenixMRJobSubmitter {
                 + "," + YarnApplication.state.RUNNING);
         int rmPort = PhoenixMRJobUtil.getRMPort(conf);
         String response = PhoenixMRJobUtil.getJobsInformationFromRM(rmHost, rmPort, urlParams);
-        LOG.debug("Already Submitted/Running Apps = " + response);
+        LOGGER.debug("Already Submitted/Running Apps = " + response);
         JSONObject jobsJson = new JSONObject(response);
         JSONObject appsJson = jobsJson.optJSONObject(YarnApplication.APPS_ELEMENT);
         Set<String> yarnApplicationSet = new HashSet<String>();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index f09cf0b..9a25681 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -29,8 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -57,6 +55,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -69,7 +69,7 @@ import com.google.common.collect.Lists;
  */
 public final class PhoenixConfigurationUtil {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixConfigurationUtil.class);
 
     public static final String SESSION_ID = "phoenix.sessionid";
     
@@ -333,7 +333,7 @@ public final class PhoenixConfigurationUtil {
             List<String> upsertColumnList =
                     PhoenixConfigurationUtil.getUpsertColumnNames(configuration);
             if(!upsertColumnList.isEmpty()) {
-                LOG.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s,"
+                LOGGER.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s,"
                                 + " upsertColumnList=%s ",!upsertColumnList.isEmpty(),
                         upsertColumnList.size(), Joiner.on(",").join(upsertColumnList)));
             }
@@ -358,11 +358,11 @@ public final class PhoenixConfigurationUtil {
         if (!upsertColumnNames.isEmpty()) {
             // Generating UPSERT statement without column name information.
             upsertStmt = QueryUtil.constructUpsertStatement(tableName, columnMetadataList);
-            LOG.info("Phoenix Custom Upsert Statement: "+ upsertStmt);
+            LOGGER.info("Phoenix Custom Upsert Statement: "+ upsertStmt);
         } else {
             // Generating UPSERT statement without column name information.
             upsertStmt = QueryUtil.constructGenericUpsertStatement(tableName, columnMetadataList.size());
-            LOG.info("Phoenix Generic Upsert Statement: " + upsertStmt);
+            LOGGER.info("Phoenix Generic Upsert Statement: " + upsertStmt);
         }
         configuration.set(UPSERT_STATEMENT, upsertStmt);
         return upsertStmt;
@@ -403,9 +403,9 @@ public final class PhoenixConfigurationUtil {
             final Configuration configuration) {
     	List<String> selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration);
         if(!selectColumnList.isEmpty()) {
-            LOG.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
-                    ,!selectColumnList.isEmpty(), selectColumnList.size(), Joiner.on(",").join(selectColumnList)
-                    ));
+            LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, " +
+                            "selectColumnList=%s ",!selectColumnList.isEmpty(),
+                    selectColumnList.size(), Joiner.on(",").join(selectColumnList)));
         }
         return selectColumnList;
     }
@@ -421,7 +421,7 @@ public final class PhoenixConfigurationUtil {
         final List<ColumnInfo> columnMetadataList = getSelectColumnMetadataList(configuration);
         final String conditions = configuration.get(INPUT_TABLE_CONDITIONS);
         selectStmt = QueryUtil.constructSelectStatement(tableName, columnMetadataList, conditions);
-        LOG.info("Select Statement: "+ selectStmt);
+        LOGGER.info("Select Statement: "+ selectStmt);
         configuration.set(SELECT_STATEMENT, selectStmt);
         return selectStmt;
     }
@@ -721,7 +721,7 @@ public final class PhoenixConfigurationUtil {
 					if (tenantId != null) {
 						tenantId = null;
 					} else {
-						BaseResultIterators.logger.warn(
+						BaseResultIterators.LOGGER.warn(
 								"Unable to find parent table \"" + parentTableName + "\" of table \""
 										+ table.getName().getString() + "\" to determine USE_STATS_FOR_PARALLELIZATION",
 								e);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
index f557089..48fb374 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
@@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
  * @since 0.1
  */
 public class GlobalMemoryManager implements MemoryManager {
-    private static final Logger logger = LoggerFactory.getLogger(GlobalMemoryManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMemoryManager.class);
 
     private final Object sync = new Object();
     private final long maxMemoryBytes;
@@ -151,7 +151,7 @@ public class GlobalMemoryManager implements MemoryManager {
         protected void finalize() throws Throwable {
             try {
                 if (size > 0) {
-                    logger.warn("Orphaned chunk of " + size + " bytes found during finalize");
+                    LOGGER.warn("Orphaned chunk of " + size + " bytes found during finalize");
                     //logger.warn("Orphaned chunk of " + size + " bytes found during finalize allocated here:\n" + stack);
                 }
                 freeMemory();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
index 24950c4..86b54df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
@@ -17,14 +17,14 @@
  */
 package org.apache.phoenix.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class Metrics {
 
-    private static final Log LOG = LogFactory.getLog(Metrics.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Metrics.class);
 
   private static volatile MetricsSystem manager = DefaultMetricsSystem.instance();
 
@@ -35,13 +35,13 @@ public class Metrics {
     public static MetricsSystem initialize() {
         // if the jars aren't on the classpath, then we don't start the metrics system
         if (manager == null) {
-            LOG.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
+            LOGGER.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
             return null;
         }
         // only initialize the metrics system once
         synchronized (Metrics.class) {
             if (!initialized) {
-                LOG.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
+                LOGGER.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
                 manager.init(Metrics.METRICS_SYSTEM_NAME);
                 initialized = true;
             }
@@ -60,7 +60,7 @@ public class Metrics {
 
     public static void ensureConfigured() {
         if (!sinkInitialized) {
-            LOG.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
+            LOGGER.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
         }
     }
 }
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
index e7c7bae..810278d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
@@ -114,7 +114,7 @@ public enum GlobalClientMetrics {
     GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED),
     GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED);
 
-    private static final Logger LOG = LoggerFactory.getLogger(GlobalClientMetrics.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalClientMetrics.class);
     private static final boolean isGlobalMetricsEnabled = QueryServicesOptions.withDefaults().isGlobalMetricsEnabled();
     private MetricType metricType;
     private GlobalMetric metric;
@@ -143,7 +143,7 @@ public enum GlobalClientMetrics {
     }
 
     private static MetricRegistry createMetricRegistry() {
-        LOG.info("Creating Metric Registry for Phoenix Global Metrics");
+        LOGGER.info("Creating Metric Registry for Phoenix Global Metrics");
         MetricRegistryInfo registryInfo = new MetricRegistryInfo("PHOENIX", "Phoenix Client Metrics",
                 "phoenix", "Phoenix,sub=CLIENT", true);
         return MetricRegistries.global().create(registryInfo);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
index 19d68cc..111a9df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
@@ -21,8 +21,6 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.metrics.Counter;
 import org.apache.hadoop.hbase.metrics.Gauge;
 import org.apache.hadoop.hbase.metrics.Histogram;
@@ -40,6 +38,8 @@ import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MutableHistogram;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Contents mostly copied from GlobalMetricRegistriesAdapter class from hbase-hadoop2-compat
@@ -48,7 +48,7 @@ import org.apache.phoenix.query.QueryServicesOptions;
  */
 public class GlobalMetricRegistriesAdapter {
 
-    private static final Log LOG = LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class);
     private static GlobalMetricRegistriesAdapter INSTANCE = new GlobalMetricRegistriesAdapter();
 
     private GlobalMetricRegistriesAdapter() {
@@ -62,7 +62,7 @@ public class GlobalMetricRegistriesAdapter {
 
     public void registerMetricRegistry(MetricRegistry registry) {
         if (registry == null) {
-            LOG.warn("Registry cannot be registered with Hadoop Metrics 2 since it is null.");
+            LOGGER.warn("Registry cannot be registered with Hadoop Metrics 2 since it is null.");
             return;
         }
 
@@ -74,7 +74,8 @@ public class GlobalMetricRegistriesAdapter {
      * Class to convert HBase Metric Objects to Hadoop Metrics2 Metric Objects
      */
     private static class HBaseMetrics2HadoopMetricsAdapter implements MetricsSource {
-        private static final Log LOG = LogFactory.getLog(HBaseMetrics2HadoopMetricsAdapter.class);
+        private static final Logger LOGGER =
+                LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class);
         private final MetricRegistry registry;
         private final String metricTag;
 
@@ -85,7 +86,7 @@ public class GlobalMetricRegistriesAdapter {
 
         private void registerToDefaultMetricsSystem() {
             MetricRegistryInfo info = registry.getMetricRegistryInfo();
-            LOG.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
+            LOGGER.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
             DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), info.getMetricsDescription(), this);
         }
 
@@ -117,7 +118,7 @@ public class GlobalMetricRegistriesAdapter {
                 } else if (metric instanceof Timer) {
                     this.addTimer(name, (Timer)metric, builder);
                 } else {
-                    LOG.info("Ignoring unknown Metric class " + metric.getClass().getName());
+                    LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName());
                 }
             }
         }
@@ -134,7 +135,7 @@ public class GlobalMetricRegistriesAdapter {
             } else if (o instanceof Double) {
                 builder.addGauge(info, (Double)o);
             } else {
-                LOG.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
+                LOGGER.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
             }
 
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c5d47dd..49f5420 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -288,7 +288,7 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
-    private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
     private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
     private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000;
     private static final int TTL_FOR_MUTEX = 15 * 60; // 15min
@@ -439,7 +439,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             try {
                 this.queryDisruptor = new QueryLoggerDisruptor(this.config);
             } catch (SQLException e) {
-                logger.warn("Unable to initiate qeuery logging service !!");
+                LOGGER.warn("Unable to initiate qeuery logging service !!");
                 e.printStackTrace();
             }
         }
@@ -450,7 +450,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             this.connection = HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
             GLOBAL_HCONNECTIONS_COUNTER.increment();
-            logger.info("HConnection established. Stacktrace for informational purposes: " + connection + " " +  LogUtil.getCallerStackTrace());
+            LOGGER.info("HConnection established. Stacktrace for informational purposes: " + connection + " " +  LogUtil.getCallerStackTrace());
         } catch (IOException e) {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
             .setRootCause(e).build().buildException();
@@ -685,7 +685,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             mutator.mutate(metaData);
                             break;
                         } else if (table.getSequenceNumber() >= tableSeqNum) {
-                            logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
+                            LOGGER.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
                             break;
                         }
                     } catch (TableNotFoundException e) {
@@ -694,7 +694,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // We waited long enough - just remove the table from the cache
                     // and the next time it's used it'll be pulled over from the server.
                     if (waitTime <= 0) {
-                        logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
+                        LOGGER.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
                         // There will never be a parentTableName here, as that would only
                         // be non null for an index an we never add/remove columns from an index.
                         metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
@@ -1076,8 +1076,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     + watch.elapsedMillis() + " ms "
                     + (numTries > 1 ? ("after trying " + numTries + (numTries > 1 ? "times." : "time.")) : ""));
         } else {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Operation "
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Operation "
                         + op.getOperationName()
                         + " completed within "
                         + watch.elapsedMillis()
@@ -1137,7 +1137,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try (Admin admin = getAdmin()) {
             final String quorum = ZKConfig.getZKQuorumServersString(config);
             final String znode = this.getProps().get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            logger.debug("Found quorum: " + quorum + ":" + znode);
+            LOGGER.debug("Found quorum: " + quorum + ":" + znode);
 
             if (isMetaTable) {
                 if(SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) {
@@ -1424,7 +1424,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 try {
                     ht.close();
                 } catch (IOException e) {
-                    logger.warn("Could not close HTable", e);
+                    LOGGER.warn("Could not close HTable", e);
                 }
             }
         }
@@ -2734,9 +2734,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             stmt.executeUpdate();
             metaConnection.commit();
         } catch (NewerTableAlreadyExistsException e) {
-            logger.warn("Table already modified at this timestamp, so assuming column already nullable: " + columnName);
+            LOGGER.warn("Table already modified at this timestamp, so assuming column already nullable: " + columnName);
         } catch (SQLException e) {
-            logger.warn("Add column failed due to:" + e);
+            LOGGER.warn("Add column failed due to:" + e);
             sqlE = e;
         } finally {
             try {
@@ -2766,9 +2766,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns );
         } catch (NewerTableAlreadyExistsException e) {
-            logger.warn("Table already modified at this timestamp, so assuming add of these columns already done: " + columns);
+            LOGGER.warn("Table already modified at this timestamp, so assuming add of these columns already done: " + columns);
         } catch (SQLException e) {
-            logger.warn("Add column failed due to:" + e);
+            LOGGER.warn("Add column failed due to:" + e);
             sqlE = e;
         } finally {
             try {
@@ -2886,7 +2886,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         boolean success = false;
                         try {
                             GLOBAL_QUERY_SERVICES_COUNTER.increment();
-                            logger.info("An instance of ConnectionQueryServices was created.");
+                            LOGGER.info("An instance of ConnectionQueryServices was created.");
                             openConnection();
                             hConnectionEstablished = true;
                             boolean isDoNotUpgradePropSet = UpgradeUtil.isNoUpgradeSet(props);
@@ -2917,7 +2917,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     if (inspectIfAnyExceptionInChain(e, Collections
                                             .<Class<? extends Exception>> singletonList(AccessDeniedException.class))) {
                                         // Pass
-                                        logger.warn("Could not check for Phoenix SYSTEM tables, assuming they exist and are properly configured");
+                                        LOGGER.warn("Could not check for Phoenix SYSTEM tables, assuming they exist and are properly configured");
                                         checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName());
                                         success = true;
                                     } else if (inspectIfAnyExceptionInChain(e,
@@ -2952,7 +2952,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     upgradeSystemTables(url, props);
                                 } else {
                                     // We expect the user to manually run the "EXECUTE UPGRADE" command first.
-                                    logger.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
+                                    LOGGER.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
                                             + "before any other command");
                                 }
                             }
@@ -3008,7 +3008,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     || admin.tableExists(TableName.valueOf(
                         PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,
                         PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME))) {
-                logger.debug("System mutex table already appears to exist, not creating it");
+                LOGGER.debug("System mutex table already appears to exist, not creating it");
                 return;
             }
             final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
@@ -3026,7 +3026,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 // Ignore TableExistsException as another client might beat us during upgrade.
                 // Ignore AccessDeniedException, as it may be possible underpriviliged user trying to use the connection
                 // which doesn't required upgrade.
-                logger.debug("Ignoring exception while creating mutex table during connection initialization: "
+                LOGGER.debug("Ignoring exception while creating mutex table during connection initialization: "
                         + Throwables.getStackTraceAsString(e));
             } else {
                 throw e;
@@ -3196,7 +3196,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                  * column BASE_COLUMN_COUNT is already part of the meta-data schema as the
                  * signal that the server side upgrade has finished or is in progress.
                  */
-                logger.debug("No need to run 4.5 upgrade");
+                LOGGER.debug("No need to run 4.5 upgrade");
             }
             Properties p = PropertiesUtil.deepCopy(metaConnection.getClientInfo());
             p.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB);
@@ -3208,18 +3208,18 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 List<String> tablesNeedingUpgrade = UpgradeUtil
                   .getPhysicalTablesWithDescRowKey(conn);
                 if (!tablesNeedingUpgrade.isEmpty()) {
-                    logger.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
+                    LOGGER.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
                       + Joiner.on(' ').join(tablesNeedingUpgrade)
                       + "\nTo upgrade issue the \"bin/psql.py -u\" command.");
                 }
                 List<String> unsupportedTables = UpgradeUtil
                   .getPhysicalTablesWithDescVarbinaryRowKey(conn);
                 if (!unsupportedTables.isEmpty()) {
-                    logger.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
+                    LOGGER.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
                       + Joiner.on(' ').join(unsupportedTables));
                 }
             } catch (Exception ex) {
-                logger.error(
+                LOGGER.error(
                   "Unable to determine tables requiring upgrade due to PHOENIX-2067",
                   ex);
             } finally {
@@ -3420,7 +3420,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, ConnectionQueryServicesImpl.this.getProps())) {
                     // Try acquiring a lock in SYSMUTEX table before migrating the tables since it involves disabling the table.
                     if (acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP)) {
-                        logger.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace "
+                        LOGGER.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace "
                           + "and/or upgrading " + sysCatalogTableName);
                     }
                     // We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
@@ -3428,7 +3428,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // If SYSTEM tables exist, they are migrated to HBase SYSTEM namespace
                     // If they don't exist or they're already migrated, this method will return immediately
                     ensureSystemTablesMigratedToSystemNamespace();
-                    logger.debug("Migrated SYSTEM tables to SYSTEM namespace");
+                    LOGGER.debug("Migrated SYSTEM tables to SYSTEM namespace");
                     metaConnection = upgradeSystemCatalogIfRequired(metaConnection, e.getSystemCatalogTimeStamp());
                 }
             } catch (TableAlreadyExistsException e) {
@@ -3439,11 +3439,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // it means some old client is either migrating SYSTEM tables or trying to upgrade the schema of
                     // SYSCAT table and hence it should not be interrupted
                     if (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp)) {
-                        logger.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName);
+                        LOGGER.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName);
                         snapshotName = getSysCatalogSnapshotName(currentServerSideTableTimeStamp);
                         createSnapshot(snapshotName, sysCatalogTableName);
                         snapshotCreated = true;
-                        logger.debug("Created snapshot for SYSCAT");
+                        LOGGER.debug("Created snapshot for SYSCAT");
                     }
                     // We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
                 }
@@ -3618,7 +3618,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         try {
                             releaseUpgradeMutex();
                         } catch (IOException e) {
-                            logger.warn("Release of upgrade mutex failed ", e);
+                            LOGGER.warn("Release of upgrade mutex failed ", e);
                         }
                     }
                 }
@@ -3712,7 +3712,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             admin = getAdmin();
             admin.snapshot(snapshotName, TableName.valueOf(tableName));
-            logger.info("Successfully created snapshot " + snapshotName + " for "
+            LOGGER.info("Successfully created snapshot " + snapshotName + " for "
                     + tableName);
         } catch (Exception e) {
             sqlE = new SQLException(e);
@@ -3744,14 +3744,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             SQLException sqlE = null;
             Admin admin = null;
             try {
-                logger.warn("Starting restore of " + tableName + " using snapshot "
+                LOGGER.warn("Starting restore of " + tableName + " using snapshot "
                         + snapshotName + " because upgrade failed");
                 admin = getAdmin();
                 admin.disableTable(TableName.valueOf(tableName));
                 tableDisabled = true;
                 admin.restoreSnapshot(snapshotName);
                 snapshotRestored = true;
-                logger.warn("Successfully restored " + tableName + " using snapshot "
+                LOGGER.warn("Successfully restored " + tableName + " using snapshot "
                         + snapshotName);
             } catch (Exception e) {
                 sqlE = new SQLException(e);
@@ -3760,10 +3760,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     try {
                         admin.enableTable(TableName.valueOf(tableName));
                         if (snapshotRestored) {
-                            logger.warn("Successfully restored and enabled " + tableName + " using snapshot "
+                            LOGGER.warn("Successfully restored and enabled " + tableName + " using snapshot "
                                     + snapshotName);
                         } else {
-                            logger.warn("Successfully enabled " + tableName + " after restoring using snapshot "
+                            LOGGER.warn("Successfully enabled " + tableName + " after restoring using snapshot "
                                     + snapshotName + " failed. ");
                         }
                     } catch (Exception e1) {
@@ -3773,7 +3773,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         } else {
                             sqlE.setNextException(enableTableEx);
                         }
-                        logger.error("Failure in enabling "
+                        LOGGER.error("Failure in enabling "
                                 + tableName
                                 + (snapshotRestored ? " after successfully restoring using snapshot"
                                         + snapshotName
@@ -3811,7 +3811,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             if (tableNames.size() == 0) { return; }
             // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:"
             if (tableNames.size() > 8) {
-                logger.warn("Expected 8 system tables but found " + tableNames.size() + ":" + tableNames);
+                LOGGER.warn("Expected 8 system tables but found " + tableNames.size() + ":" + tableNames);
             }
 
             byte[] mappedSystemTable = SchemaUtil
@@ -3819,7 +3819,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             metatable = getTable(mappedSystemTable);
             if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
                 if (!admin.tableExists(TableName.valueOf(mappedSystemTable))) {
-                    logger.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
+                    LOGGER.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
                     // Actual migration of SYSCAT table
                     UpgradeUtil.mapTableToNamespace(admin, metatable,
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, this.getProps(), null, PTableType.SYSTEM,
@@ -3832,7 +3832,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
             }
             for (TableName table : tableNames) {
-                logger.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
+                LOGGER.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
                 UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), this.getProps(), null, PTableType.SYSTEM,
                         null);
                 ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null,
@@ -3900,10 +3900,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 + tableName + " columnName : " + columnName + " familyName : "
                                 + familyName;
                 if (!checkAndPut) {
-                    logger.error(processName + " failed to acquire mutex for "+ msg);
+                    LOGGER.error(processName + " failed to acquire mutex for "+ msg);
                 }
                 else {
-                    logger.debug(processName + " acquired mutex for "+ msg);
+                    LOGGER.debug(processName + " acquired mutex for "+ msg);
                 }
                 return checkAndPut;
             }
@@ -3941,7 +3941,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : "
                                 + tableName + " columnName : " + columnName + " familyName : "
                                 + familyName;
-                logger.debug(processName + " released mutex for "+ msg);
+                LOGGER.debug(processName + " released mutex for "+ msg);
             }
         } catch (IOException e) {
             throw ServerUtil.parseServerException(e);
@@ -3996,7 +3996,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             " B.COLUMN_FAMILY IS NOT NULL AND\n" +
                             " A.IMMUTABLE_ROWS = TRUE");
         } catch (SQLException e) {
-            logger.warn("exception during upgrading stats table:" + e);
+            LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
         } finally {
             try {
@@ -4037,7 +4037,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, DISABLE_WAL)\n" +
                             "VALUES (NULL, '" + QueryConstants.SYSTEM_SCHEMA_NAME + "','" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "', NULL, NULL, FALSE)");
         } catch (SQLException e) {
-            logger.warn("exception during upgrading stats table:" + e);
+            LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
         } finally {
             try {
@@ -4072,7 +4072,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     + "' AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='"
                     + PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + "'");
         } catch (SQLException e) {
-            logger.warn("exception during upgrading stats table:" + e);
+            LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
         } finally {
             try {
@@ -4893,7 +4893,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     // add it back at the tail
                                     scannerQueue.offer(new WeakReference<TableResultIterator>(
                                             scanningItr));
-                                    logger.info("Lease renewed for scanner: " + scanningItr);
+                                    LOGGER.info("Lease renewed for scanner: " + scanningItr);
                                     break;
                                 // Scanner not initialized probably because next() hasn't been called on it yet. Enqueue it back to attempt lease renewal later.
                                 case UNINITIALIZED:
@@ -4915,7 +4915,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             numScanners--;
                         }
                         if (renewed > 0) {
-                            logger.info("Renewed leases for " + renewed + " scanner/s in "
+                            LOGGER.info("Renewed leases for " + renewed + " scanner/s in "
                                     + (System.currentTimeMillis() - start) + " ms ");
                         }
                         connectionsQueue.offer(connRef);
@@ -4923,7 +4923,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     numConnections--;
                 }
             } catch (InternalRenewLeaseTaskException e) {
-                logger.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
+                LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
                 // clear up the queue since the task is about to be unscheduled.
                 connectionsQueue.clear();
                 // throw an exception since we want the task execution to be suppressed because we just encountered an
@@ -4931,13 +4931,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 throw new RuntimeException(e);
             } catch (InterruptedException e) {
                 Thread.currentThread().interrupt(); // restore the interrupt status
-                logger.error("Thread interrupted when renewing lease.", e);
+                LOGGER.error("Thread interrupted when renewing lease.", e);
             } catch (Exception e) {
-                logger.error("Exception thrown when renewing lease ", e);
+                LOGGER.error("Exception thrown when renewing lease ", e);
                 // don't drain the queue and swallow the exception in this case since we don't want the task
                 // execution to be suppressed because renewing lease of a scanner failed.
             } catch (Throwable e) {
-                logger.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
+                LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
                 connectionsQueue.clear(); // clear up the queue since the task is about to be unscheduled.
                 throw new RuntimeException(e);
             }
@@ -5060,7 +5060,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try (Admin admin = getAdmin()) {
             final String quorum = ZKConfig.getZKQuorumServersString(config);
             final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            logger.debug("Found quorum: " + quorum + ":" + znode);
+            LOGGER.debug("Found quorum: " + quorum + ":" + znode);
             boolean nameSpaceExists = true;
             try {
                 admin.getNamespaceDescriptor(schemaName);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index 6679a75..5eb2b97 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -32,4 +32,4 @@ public interface GuidePostsCache {
 
     void invalidateAll();
 
-}
+}
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java
index 8a6baf2..9174bc4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java
@@ -35,7 +35,7 @@ import java.util.concurrent.Executors;
  * {@link CacheLoader} asynchronous implementation for the Phoenix Table Stats cache.
  */
 public class PhoenixStatsCacheLoader extends CacheLoader<GuidePostsKey, GuidePostsInfo> {
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixStatsCacheLoader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatsCacheLoader.class);
 
     final private PhoenixStatsLoader statsLoader;
     private static volatile ExecutorService executor;
@@ -75,7 +75,7 @@ public class PhoenixStatsCacheLoader extends CacheLoader<GuidePostsKey, GuidePos
                             try {
                                 return statsLoader.loadStats(key, prevGuidepostInfo);
                             } catch (Exception e) {
-                                logger.warn("Unable to load stats from table: " + key.toString(), e);
+                                LOGGER.warn("Unable to load stats from table: " + key.toString(), e);
                                 return prevGuidepostInfo;
                             }
                         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 3b6a77e..d0f0d23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -261,7 +261,7 @@ import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
 
 public class MetaDataClient {
-    private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataClient.class);
 
     private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
     private static final String SET_ASYNC_CREATED_DATE =
@@ -1730,7 +1730,7 @@ public class MetaDataClient {
             return new MutationState(0, 0, connection);
         }
 
-        if (logger.isInfoEnabled()) logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
+        if (LOGGER.isInfoEnabled()) LOGGER.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
         boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(
                 QueryServices.INDEX_ASYNC_BUILD_ENABLED,
                 QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
@@ -3335,8 +3335,8 @@ public class MetaDataClient {
             break;
         case CONCURRENT_TABLE_MUTATION:
             addTableToCache(result);
-            if (logger.isDebugEnabled()) {
-                logger.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
             }
             throw new ConcurrentTableMutationException(schemaName, tableName);
         case NEWER_TABLE_FOUND:
@@ -3587,8 +3587,8 @@ public class MetaDataClient {
                 int nNewColumns = numCols;
                 List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
                 List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
-                if (logger.isDebugEnabled()) {
-                    logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
                 }
 
                 int position = table.getColumns().size();
@@ -3917,8 +3917,8 @@ public class MetaDataClient {
                     if (retried) {
                         throw e;
                     }
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
                     }
                     retried = true;
                 }
@@ -4969,7 +4969,7 @@ public class MetaDataClient {
      */
     public MutationState changePermissions(ChangePermsStatement changePermsStatement) throws SQLException {
 
-        logger.info(changePermsStatement.toString());
+        LOGGER.info(changePermsStatement.toString());
 
         try(Admin admin = connection.getQueryServices().getAdmin()) {
             ClusterConnection clusterConnection = (ClusterConnection) admin.getConnection();
@@ -5040,7 +5040,7 @@ public class MetaDataClient {
                 inconsistentTables.add(indexTable);
                 continue;
             }
-            logger.info("Updating permissions for Index Table: " +
+            LOGGER.info("Updating permissions for Index Table: " +
                     indexTable.getName() + " Base Table: " + inputTable.getName());
             tableName = SchemaUtil.getPhysicalTableName(indexTable.getPhysicalName().getBytes(), indexTable.isNamespaceMapped());
             changePermsOnTable(clusterConnection, changePermsStatement, tableName);
@@ -5048,7 +5048,7 @@ public class MetaDataClient {
 
         if(schemaInconsistency) {
             for(PTable table : inconsistentTables) {
-                logger.error("Fail to propagate permissions to Index Table: " + table.getName());
+                LOGGER.error("Fail to propagate permissions to Index Table: " + table.getName());
             }
             throw new TablesNotInSyncException(inputTable.getTableName().getString(),
                     inconsistentTables.get(0).getTableName().getString(), "Namespace properties");
@@ -5059,13 +5059,13 @@ public class MetaDataClient {
         tableName = org.apache.hadoop.hbase.TableName.valueOf(viewIndexTableBytes);
         boolean viewIndexTableExists = admin.tableExists(tableName);
         if(viewIndexTableExists) {
-            logger.info("Updating permissions for View Index Table: " +
+            LOGGER.info("Updating permissions for View Index Table: " +
                     Bytes.toString(viewIndexTableBytes) + " Base Table: " + inputTable.getName());
             changePermsOnTable(clusterConnection, changePermsStatement, tableName);
         } else {
             if(inputTable.isMultiTenant()) {
-                logger.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
-                logger.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString());
+                LOGGER.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
+                LOGGER.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString());
                 throw new TablesNotInSyncException(inputTable.getTableName().getString(),
                         Bytes.toString(viewIndexTableBytes), " View Index table should exist for MultiTenant tables");
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 70379ee..58b09bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -58,6 +56,8 @@ import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 
@@ -66,7 +66,7 @@ import com.google.common.collect.Maps;
  */
 public class DefaultStatisticsCollector implements StatisticsCollector {
 
-    private static final Log LOG = LogFactory.getLog(DefaultStatisticsCollector.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(DefaultStatisticsCollector.class);
     
     final Map<ImmutableBytesPtr, Pair<Long, GuidePostsInfoBuilder>> guidePostsInfoWriterMap = Maps.newHashMap();
     private final Table htable;
@@ -122,7 +122,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
         } catch (SQLException e) {
             throw new IOException(e);
         }
-        LOG.info("Initialization complete for " +
+        LOGGER.info("Initialization complete for " +
                 this.getClass() + " statistics collector for table " + tableName);
     }
 
@@ -141,12 +141,12 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
     private void initGuidepostDepth() throws IOException, SQLException {
         if (guidePostPerRegionBytes != null || guidePostWidthBytes != null) {
             getGuidePostDepthFromStatement();
-            LOG.info("Guide post depth determined from SQL statement: " + guidePostDepth);
+            LOGGER.info("Guide post depth determined from SQL statement: " + guidePostDepth);
         } else {
             long guidepostWidth = getGuidePostDepthFromSystemCatalog();
             if (guidepostWidth >= 0) {
                 this.guidePostDepth = guidepostWidth;
-                LOG.info("Guide post depth determined from SYSTEM.CATALOG: " + guidePostDepth);
+                LOGGER.info("Guide post depth determined from SYSTEM.CATALOG: " + guidePostDepth);
             } else {
                 this.guidePostDepth = StatisticsUtil.getGuidePostDepth(
                         configuration.getInt(
@@ -156,7 +156,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
                                 QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
                                 QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES),
                         region.getTableDescriptor());
-                LOG.info("Guide post depth determined from global configuration: " + guidePostDepth);
+                LOGGER.info("Guide post depth determined from global configuration: " + guidePostDepth);
             }
         }
 
@@ -211,7 +211,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
                 try {
                     htable.close();
                 } catch (IOException e) {
-                    LOG.warn("Failed to close " + htable.getName(), e);
+                    LOGGER.warn("Failed to close " + htable.getName(), e);
                 }
             }
         }
@@ -250,7 +250,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
                     EnvironmentEdgeManager.currentTimeMillis(), scan);
             commitStats(mutations);
         } catch (IOException e) {
-            LOG.error("Unable to update SYSTEM.STATS table.", e);
+            LOGGER.error("Unable to update SYSTEM.STATS table.", e);
         }
     }
 
@@ -284,21 +284,21 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
         for (ImmutableBytesPtr fam : fams) {
             if (delete) {
                 statsWriter.deleteStatsForRegion(region, this, fam, mutations);
-                LOG.info("Generated " + mutations.size() + " mutations to delete existing stats");
+                LOGGER.info("Generated " + mutations.size() + " mutations to delete existing stats");
             }
 
             // If we've disabled stats, don't write any, just delete them
             if (this.guidePostDepth > 0) {
                 int oldSize = mutations.size();
                 statsWriter.addStats(this, fam, mutations, guidePostDepth);
-                LOG.info("Generated " + (mutations.size() - oldSize) + " mutations for new stats");
+                LOGGER.info("Generated " + (mutations.size() - oldSize) + " mutations for new stats");
             }
         }
     }
 
     private void commitStats(List<Mutation> mutations) throws IOException {
         statsWriter.commitStats(mutations, this);
-        LOG.info("Committed " + mutations.size() + " mutations for stats");
+        LOGGER.info("Committed " + mutations.size() + " mutations for stats");
     }
 
     /**
@@ -387,7 +387,7 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
 
         ImmutableBytesPtr cfKey =
                 new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName());
-        LOG.info("StatisticsScanner created for table: "
+        LOGGER.info("StatisticsScanner created for table: "
                 + tableName + " CF: " + store.getColumnFamilyName());
         return new StatisticsScanner(this, statsWriter, env, delegate, cfKey);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 36eb9bd..5d53c3b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -25,8 +25,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -38,12 +36,14 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The scanner that does the scanning to collect the stats during major compaction.{@link DefaultStatisticsCollector}
  */
 public class StatisticsScanner implements InternalScanner {
-    private static final Log LOG = LogFactory.getLog(StatisticsScanner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsScanner.class);
     private InternalScanner delegate;
     private StatisticsWriter statsWriter;
     private Region region;
@@ -95,7 +95,7 @@ public class StatisticsScanner implements InternalScanner {
         StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config);
         StatisticsScannerCallable callable = createCallable();
         if (isConnectionClosed()) {
-            LOG.debug("Not updating table statistics because the server is stopping/stopped");
+            LOGGER.debug("Not updating table statistics because the server is stopping/stopped");
             return;
         }
         if (!async) {
@@ -149,27 +149,27 @@ public class StatisticsScanner implements InternalScanner {
                 // Just verify if this if fine
                 ArrayList<Mutation> mutations = new ArrayList<Mutation>();
 
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations);
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().addStats(tracker, family,
                         mutations, tracker.getGuidePostDepth());
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().commitStats(mutations, tracker);
             } catch (IOException e) {
                 if (isConnectionClosed()) {
-                    LOG.debug("Ignoring error updating statistics because region is closing/closed");
+                    LOGGER.debug("Ignoring error updating statistics because region is closing/closed");
                 } else {
-                    LOG.error("Failed to update statistics table!", e);
+                    LOGGER.error("Failed to update statistics table!", e);
                     toThrow = e;
                 }
             } finally {
@@ -179,14 +179,14 @@ public class StatisticsScanner implements InternalScanner {
                     getTracker().close();// close the tracker
                 } catch (IOException e) {
                     if (toThrow == null) toThrow = e;
-                    LOG.error("Error while closing the stats table", e);
+                    LOGGER.error("Error while closing the stats table", e);
                 } finally {
                     // close the delegate scanner
                     try {
                         getDelegate().close();
                     } catch (IOException e) {
                         if (toThrow == null) toThrow = e;
-                        LOG.error("Error while closing the scanner", e);
+                        LOGGER.error("Error while closing the scanner", e);
                     } finally {
                         if (toThrow != null) { throw toThrow; }
                     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
index 110682d..1b99759 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
@@ -71,7 +71,7 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_IS_NAMESPACE
  */
 public class UpdateStatisticsTool extends Configured implements Tool {
 
-    private static final Logger LOG = LoggerFactory.getLogger(UpdateStatisticsTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpdateStatisticsTool.class);
 
     private static final Option TABLE_NAME_OPTION = new Option("t", "table", true,
             "Phoenix Table Name");
@@ -126,7 +126,7 @@ public class UpdateStatisticsTool extends Configured implements Tool {
             String physicalTableName =  SchemaUtil.getPhysicalTableName(tableName.getBytes(),
                     namespaceMapping).getNameAsString();
             admin.snapshot(snapshotName, TableName.valueOf(physicalTableName));
-            LOG.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName);
+            LOGGER.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName);
         }
     }
 
@@ -142,7 +142,7 @@ public class UpdateStatisticsTool extends Configured implements Tool {
         try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) {
             Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
             admin.deleteSnapshot(snapshotName);
-            LOG.info("Successfully deleted snapshot " + snapshotName);
+            LOGGER.info("Successfully deleted snapshot " + snapshotName);
         }
     }
 
@@ -221,23 +221,23 @@ public class UpdateStatisticsTool extends Configured implements Tool {
                 CharStream.class, TransactionSystemClient.class, TransactionNotInProgressException.class,
                 ZKClient.class, DiscoveryServiceClient.class, ZKDiscoveryService.class,
                 Cancellable.class, TTransportException.class, SpanReceiver.class, TransactionProcessor.class, Gauge.class, MetricRegistriesImpl.class);
-        LOG.info("UpdateStatisticsTool running for: " + tableName
+        LOGGER.info("UpdateStatisticsTool running for: " + tableName
                 + " on snapshot: " + snapshotName + " with restore dir: " + restoreDir);
     }
 
     private int runJob() {
         try {
             if (isForeground) {
-                LOG.info("Running UpdateStatisticsTool in Foreground. " +
+                LOGGER.info("Running UpdateStatisticsTool in Foreground. " +
                         "Runs full table scans. This may take a long time!");
                 return (job.waitForCompletion(true)) ? 0 : 1;
             } else {
-                LOG.info("Running UpdateStatisticsTool in Background - Submit async and exit");
+                LOGGER.info("Running UpdateStatisticsTool in Background - Submit async and exit");
                 job.submit();
                 return 0;
             }
         } catch (Exception e) {
-            LOG.error("Caught exception " + e + " trying to update statistics.");
+            LOGGER.error("Caught exception " + e + " trying to update statistics.");
             return 1;
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
index 865d210..ed35ec1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -234,7 +234,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         }
     }
 
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixCanaryTool.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixCanaryTool.class);
 
     private static String getCurrentTimestamp() {
         return new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.ms").format(new Date());
@@ -289,7 +289,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         try {
             Namespace cArgs = parseArgs(args);
             if (cArgs == null) {
-                LOG.error("Argument parsing failed.");
+                LOGGER.error("Argument parsing failed.");
                 throw new RuntimeException("Argument parsing failed");
             }
 
@@ -326,7 +326,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             connection = getConnectionWithRetry(connectionURL);
 
             if (connection == null) {
-                LOG.error("Failed to get connection after multiple retries; the connection is null");
+                LOGGER.error("Failed to get connection after multiple retries; the connection is null");
             }
 
             SimpleTimeLimiter limiter = new SimpleTimeLimiter();
@@ -338,10 +338,10 @@ public class PhoenixCanaryTool extends Configured implements Tool {
                     sink.clearResults();
 
                     // Execute tests
-                    LOG.info("Starting UpsertTableTest");
+                    LOGGER.info("Starting UpsertTableTest");
                     sink.updateResults(new UpsertTableTest().runTest(connection));
 
-                    LOG.info("Starting ReadTableTest");
+                    LOGGER.info("Starting ReadTableTest");
                     sink.updateResults(new ReadTableTest().runTest(connection));
                     return null;
 
@@ -354,7 +354,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             appInfo.setSuccessful(true);
 
         } catch (Exception e) {
-            LOG.error(Throwables.getStackTraceAsString(e));
+            LOGGER.error(Throwables.getStackTraceAsString(e));
             appInfo.setMessage(Throwables.getStackTraceAsString(e));
             appInfo.setSuccessful(false);
 
@@ -372,11 +372,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
         try{
             connection = getConnectionWithRetry(connectionURL, true);
         } catch (Exception e) {
-            LOG.info("Failed to get connection with namespace enabled", e);
+            LOGGER.info("Failed to get connection with namespace enabled", e);
             try {
                 connection = getConnectionWithRetry(connectionURL, false);
             } catch (Exception ex) {
-                LOG.info("Failed to get connection without namespace enabled", ex);
+                LOGGER.info("Failed to get connection without namespace enabled", ex);
             }
         }
         return connection;
@@ -392,7 +392,7 @@ public class PhoenixCanaryTool extends Configured implements Tool {
 
         RetryCounter retrier = new RetryCounter(MAX_CONNECTION_ATTEMPTS,
                 FIRST_TIME_RETRY_TIMEOUT, TimeUnit.MILLISECONDS);
-        LOG.info("Trying to get the connection with "
+        LOGGER.info("Trying to get the connection with "
                 + retrier.getMaxAttempts() + " attempts with "
                 + "connectionURL :" + connectionURL
                 + "connProps :" + connProps);
@@ -400,11 +400,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
             try {
                 connection = DriverManager.getConnection(connectionURL, connProps);
             } catch (SQLException e) {
-                LOG.info("Trying to establish connection with "
+                LOGGER.info("Trying to establish connection with "
                         + retrier.getAttemptTimes() + " attempts", e);
             }
             if (connection != null) {
-                LOG.info("Successfully established connection within "
+                LOGGER.info("Successfully established connection within "
                         + retrier.getAttemptTimes() + " attempts");
                 break;
             }
@@ -415,11 +415,11 @@ public class PhoenixCanaryTool extends Configured implements Tool {
 
     public static void main(final String[] args) {
         try {
-            LOG.info("Starting Phoenix Canary Test tool...");
+            LOGGER.info("Starting Phoenix Canary Test tool...");
             ToolRunner.run(new PhoenixCanaryTool(), args);
         } catch (Exception e) {
-            LOG.error("Error in running Phoenix Canary Test tool. " + e);
+            LOGGER.error("Error in running Phoenix Canary Test tool. " + e);
         }
-        LOG.info("Exiting Phoenix Canary Test tool...");
+        LOGGER.info("Exiting Phoenix Canary Test tool...");
     }
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
index 65071f0..8d0aff5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
@@ -35,8 +35,6 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.metrics2.AbstractMetric;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -78,7 +78,7 @@ import com.google.common.base.Joiner;
  */
 public class PhoenixMetricsSink implements MetricsSink {
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMetricsSink.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsSink.class);
 
     private static final String VARIABLE_VALUE = "?";
 
@@ -102,14 +102,14 @@ public class PhoenixMetricsSink implements MetricsSink {
     private String table;
     
     public PhoenixMetricsSink() {
-        LOG.info("Writing tracing metrics to phoenix table");
+        LOGGER.info("Writing tracing metrics to phoenix table");
 
     }
 
     @Override
     public void init(SubsetConfiguration config) {
         Metrics.markSinkInitialized();
-        LOG.info("Phoenix tracing writer started");
+        LOGGER.info("Phoenix tracing writer started");
     }
 
     /**
@@ -210,7 +210,7 @@ public class PhoenixMetricsSink implements MetricsSink {
         try {
             this.conn.commit();
         } catch (SQLException e) {
-            LOG.error("Failed to commit changes to table", e);
+            LOGGER.error("Failed to commit changes to table", e);
         }
     }
 
@@ -270,7 +270,7 @@ public class PhoenixMetricsSink implements MetricsSink {
             } else if (tag.name().equals("Context")) {
                 // ignored
             } else {
-                LOG.error("Got an unexpected tag: " + tag);
+                LOGGER.error("Got an unexpected tag: " + tag);
             }
         }
 
@@ -286,9 +286,9 @@ public class PhoenixMetricsSink implements MetricsSink {
         stmt += COMMAS.join(keys);
         stmt += ") VALUES (" + COMMAS.join(values) + ")";
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Logging metrics to phoenix table via: " + stmt);
-            LOG.trace("With tags: " + variableValues);
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace("Logging metrics to phoenix table via: " + stmt);
+            LOGGER.trace("With tags: " + variableValues);
         }
         try {
             PreparedStatement ps = conn.prepareStatement(stmt);
@@ -304,7 +304,7 @@ public class PhoenixMetricsSink implements MetricsSink {
             MutationState newState = plan.execute();
             state.join(newState);
         } catch (SQLException e) {
-            LOG.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
+            LOGGER.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
                     e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 68b945c..88cc642 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -37,6 +35,8 @@ import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.LogUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.primitives.Longs;
@@ -46,7 +46,7 @@ import com.google.common.primitives.Longs;
  */
 public class TraceReader {
 
-    private static final Log LOG = LogFactory.getLog(TraceReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceReader.class);
     private final Joiner comma = Joiner.on(',');
     private String knownColumns;
     {
@@ -146,7 +146,7 @@ public class TraceReader {
                     orphan.parent = spanInfo;
                     spanInfo.children.add(orphan);
                     // / its no longer an orphan
-                    LOG.trace(addCustomAnnotations("Found parent for span: " + span));
+                    LOGGER.trace(addCustomAnnotations("Found parent for span: " + span));
                     orphans.remove(i--);
                 }
             }
@@ -156,7 +156,7 @@ public class TraceReader {
                 parentSpan.children.add(spanInfo);
             } else if (parent != Span.ROOT_SPAN_ID) {
                 // add the span to the orphan pile to check for the remaining spans we see
-                LOG.info(addCustomAnnotations("No parent span found for span: " + span + " (root span id: "
+                LOGGER.info(addCustomAnnotations("No parent span found for span: " + span + " (root span id: "
                         + Span.ROOT_SPAN_ID + ")"));
                 orphans.add(spanInfo);
             }
@@ -213,7 +213,7 @@ public class TraceReader {
                         + MetricInfo.TRACE.columnName + "=" + traceid + " AND "
                         + MetricInfo.PARENT.columnName + "=" + parent + " AND "
                         + MetricInfo.SPAN.columnName + "=" + span;
-        LOG.trace(addCustomAnnotations("Requesting columns with: " + request));
+        LOGGER.trace(addCustomAnnotations("Requesting columns with: " + request));
         ResultSet results = conn.createStatement().executeQuery(request);
         List<String> cols = new ArrayList<String>();
         while (results.next()) {
@@ -222,7 +222,7 @@ public class TraceReader {
             }
         }
         if (cols.size() < count) {
-            LOG.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
+            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
                     + " tags from rquest " + request));
         }
         return cols;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
index 122ae28..a2b84b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java
@@ -22,13 +22,13 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.impl.MilliSpan;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Sink for request traces ({@link SpanReceiver}) that pushes writes to {@link TraceWriter} in a
@@ -64,7 +64,7 @@ import org.apache.phoenix.query.QueryServicesOptions;
  */
 public class TraceSpanReceiver implements SpanReceiver {
 
-    private static final Log LOG = LogFactory.getLog(TraceSpanReceiver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceSpanReceiver.class);
 
     private static final int CAPACITY = QueryServicesOptions.withDefaults().getTracingTraceBufferSize();
 
@@ -77,11 +77,11 @@ public class TraceSpanReceiver implements SpanReceiver {
     @Override
     public void receiveSpan(Span span) {
         if (span.getTraceId() != 0 && spanQueue.offer(span)) {
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Span buffered to queue " + span.toJson());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Span buffered to queue " + span.toJson());
             }
-        } else if (span.getTraceId() != 0 && LOG.isDebugEnabled()) {
-                LOG.debug("Span NOT buffered due to overflow in queue " + span.toJson());
+        } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson());
         }
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
index e823359..f8dc19e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceWriter.java
@@ -38,8 +38,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.util.Pair;
@@ -56,6 +54,8 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -68,7 +68,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * batch commit size.
  */
 public class TraceWriter {
-    private static final Log LOG = LogFactory.getLog(TraceWriter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TraceWriter.class);
 
     private static final String VARIABLE_VALUE = "?";
 
@@ -105,9 +105,9 @@ public class TraceWriter {
 
         traceSpanReceiver = getTraceSpanReceiver();
         if (traceSpanReceiver == null) {
-            LOG.warn(
+            LOGGER.warn(
                 "No receiver has been initialized for TraceWriter. Traces will not be written.");
-            LOG.warn("Restart Phoenix to try again.");
+            LOGGER.warn("Restart Phoenix to try again.");
             return;
         }
 
@@ -119,7 +119,7 @@ public class TraceWriter {
             executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS);
         }
 
-        LOG.info("Writing tracing metrics to phoenix table");
+        LOGGER.info("Writing tracing metrics to phoenix table");
     }
 
     @VisibleForTesting
@@ -142,8 +142,8 @@ public class TraceWriter {
             while (!traceSpanReceiver.isSpanAvailable()) {
                 Span span = traceSpanReceiver.getSpan();
                 if (null == span) break;
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("Span received: " + span.toJson());
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace("Span received: " + span.toJson());
                 }
                 addToBatch(span);
                 counter++;
@@ -217,9 +217,9 @@ public class TraceWriter {
             stmt += COMMAS.join(keys);
             stmt += ") VALUES (" + COMMAS.join(values) + ")";
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Logging metrics to phoenix table via: " + stmt);
-                LOG.trace("With tags: " + variableValues);
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Logging metrics to phoenix table via: " + stmt);
+                LOGGER.trace("With tags: " + variableValues);
             }
             try {
                 PreparedStatement ps = conn.prepareStatement(stmt);
@@ -237,7 +237,7 @@ public class TraceWriter {
                 MutationState newState = plan.execute();
                 state.join(newState);
             } catch (SQLException e) {
-                LOG.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt,
+                LOGGER.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt,
                     e);
             }
         }
@@ -272,14 +272,14 @@ public class TraceWriter {
                 createTable(conn, tableName);
             }
 
-            LOG.info(
+            LOGGER.info(
                 "Created new connection for tracing " + conn.toString() + " Table: " + tableName);
             return conn;
         } catch (Exception e) {
-            LOG.error("Tracing will NOT be pursued. New connection failed for tracing Table: "
+            LOGGER.error("Tracing will NOT be pursued. New connection failed for tracing Table: "
                     + tableName,
                 e);
-            LOG.error("Restart Phoenix to retry.");
+            LOGGER.error("Restart Phoenix to retry.");
             return null;
         }
     }
@@ -324,7 +324,7 @@ public class TraceWriter {
         try {
             conn.commit();
         } catch (SQLException e) {
-            LOG.error(
+            LOGGER.error(
                 "Unable to commit traces on conn: " + conn.toString() + " to table: " + tableName,
                 e);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index 616ad30..f68d8cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -26,8 +26,6 @@ import java.util.concurrent.Callable;
 
 import javax.annotation.Nullable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.htrace.HTraceConfiguration;
 import org.apache.phoenix.call.CallRunner;
@@ -46,6 +44,8 @@ import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.htrace.wrappers.TraceCallable;
 import org.apache.htrace.wrappers.TraceRunnable;
 import org.apache.phoenix.trace.TraceWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
@@ -56,7 +56,7 @@ import com.sun.istack.NotNull;
  */
 public class Tracing {
 
-    private static final Log LOG = LogFactory.getLog(Tracing.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Tracing.class);
 
     private static final String SEPARATOR = ".";
     // Constants for tracing across the wire
@@ -275,14 +275,14 @@ public class Tracing {
                 traceWriter.start();
             }
         } catch (RuntimeException e) {
-            LOG.warn("Tracing will outputs will not be written to any metrics sink! No "
+            LOGGER.warn("Tracing will outputs will not be written to any metrics sink! No "
                     + "TraceMetricsSink found on the classpath", e);
         } catch (IllegalAccessError e) {
             // This is an issue when we have a class incompatibility error, such as when running
             // within SquirrelSQL which uses an older incompatible version of commons-collections.
             // Seeing as this only results in disabling tracing, we swallow this exception and just
             // continue on without tracing.
-            LOG.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
+            LOGGER.warn("Class incompatibility while initializing metrics, metrics will be disabled", e);
         }
         initialized = true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index f249af2..414a519 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -46,7 +46,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
 
 public class OmidTransactionContext implements PhoenixTransactionContext {
 
-    private static final Logger logger = LoggerFactory.getLogger(OmidTransactionContext.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(OmidTransactionContext.class);
 
     private HBaseTransactionManager tm;
     private HBaseTransaction tx;
@@ -166,8 +166,8 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
 
         try {
             tx = (HBaseTransaction) tm.fence(dataTable.getName().getBytes());
-            if (logger.isInfoEnabled()) {
-                logger.info("Added write fence at ~"
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info("Added write fence at ~"
                         + tx.getReadTimestamp());
             }
         } catch (TransactionException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 1aea0f3..19590e6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -65,7 +64,7 @@ import com.google.common.collect.Lists;
 
 
 public class TephraTransactionContext implements PhoenixTransactionContext {
-    private static final Logger logger = LoggerFactory.getLogger(TephraTransactionContext.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TephraTransactionContext.class);
     private static final TransactionCodec CODEC = new TransactionCodec();
 
     private final List<TransactionAware> txAwares;
@@ -209,8 +208,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
                     txServiceClient);
             fenceWait.await(10000, TimeUnit.MILLISECONDS);
 
-            if (logger.isInfoEnabled()) {
-                logger.info("Added write fence at ~"
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info("Added write fence at ~"
                         + getCurrentTransaction().getReadPointer());
             }
         } catch (InterruptedException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 23f123e..a5f0177 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -42,7 +42,7 @@ import com.google.common.collect.ImmutableMap;
  */
 public class CSVCommonsLoader {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CSVCommonsLoader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CSVCommonsLoader.class);
 
     public static final String DEFAULT_ARRAY_ELEMENT_SEPARATOR = ":";
 
@@ -277,10 +277,10 @@ public class CSVCommonsLoader {
             totalUpserts = upsertCount;
             if (upsertCount % upsertBatchSize == 0) {
                 if (upsertCount % 1000 == 0) {
-                    LOG.info("Processed upsert #{}", upsertCount);
+                    LOGGER.info("Processed upsert #{}", upsertCount);
                 }
                 try {
-                    LOG.info("Committing after {} records", upsertCount);
+                    LOGGER.info("Committing after {} records", upsertCount);
                     conn.commit();
                 } catch (SQLException e) {
                     throw new RuntimeException(e);
@@ -290,7 +290,7 @@ public class CSVCommonsLoader {
 
         @Override
         public void errorOnRecord(CSVRecord csvRecord, Throwable throwable) {
-            LOG.error("Error upserting record " + csvRecord, throwable.getMessage());
+            LOGGER.error("Error upserting record " + csvRecord, throwable.getMessage());
             if (strict) {
                 Throwables.propagate(throwable);
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
index 7649933..d042fac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
@@ -22,10 +22,10 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
  *  comes out to basically O(log(T))
  */
 public class EquiDepthStreamHistogram {
-    private static final Log LOG = LogFactory.getLog(EquiDepthStreamHistogram.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(EquiDepthStreamHistogram.class);
 
     // used in maxSize calculation for each bar
     private static final double MAX_COEF = 1.7;
@@ -175,8 +175,8 @@ public class EquiDepthStreamHistogram {
         } else {
             smallerBar.incrementCount(countToDistribute);
         }
-        if (LOG.isTraceEnabled()) {
-            LOG.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
         }
         bars.remove(origBar);
         bars.add(newLeft);
@@ -230,8 +230,8 @@ public class EquiDepthStreamHistogram {
         bars.subList(currMinIdx, currMinIdx + 2).clear(); // remove minBars
         bars.add(newBar);
         Collections.sort(bars);
-        if (LOG.isTraceEnabled()) {
-            LOG.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
+        if (LOGGER.isTraceEnabled()) {
+            LOGGER.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
         }
         return true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index a3912cf..98dc1c2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -91,7 +91,7 @@ import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
 public class MetaDataUtil {
-    private static final Logger logger = LoggerFactory.getLogger(MetaDataUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataUtil.class);
   
     public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_";
     public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_";
@@ -783,12 +783,12 @@ public class MetaDataUtil {
                     org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller,
                         admin, loc.getRegion().getRegionName());
                 } catch (RemoteException e) {
-                    logger.debug("Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e);
+                    LOGGER.debug("Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e);
                     return false;
                 }
             }
         } catch (IOException ex) {
-            logger.warn("tableRegionsOnline failed due to:" + ex);
+            LOGGER.warn("tableRegionsOnline failed due to:" + ex);
             return false;
         } finally {
             if (hcon != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
index bf0d03b..59fb262 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java
@@ -31,8 +31,6 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -43,6 +41,8 @@ import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 import org.codehaus.jettison.json.JSONException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.InvalidProtocolBufferException;
 
@@ -71,7 +71,7 @@ public class PhoenixMRJobUtil {
     public static final int RM_CONNECT_TIMEOUT_MILLIS = 10 * 1000;
     public static final int RM_READ_TIMEOUT_MILLIS = 10 * 60 * 1000;
 
-    private static final Log LOG = LogFactory.getLog(PhoenixMRJobUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobUtil.class);
 
     public static final String PHOENIX_MR_SCHEDULER_TYPE_NAME = "phoenix.index.mr.scheduler.type";
 
@@ -101,11 +101,11 @@ public class PhoenixMRJobUtil {
                         byte[] data = zk.getData(path, zkw, new Stat());
                         ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data);
                         proto.getRmId();
-                        LOG.info("Active RmId : " + proto.getRmId());
+                        LOGGER.info("Active RmId : " + proto.getRmId());
 
                         activeRMHost =
                                 config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId());
-                        LOG.info("activeResourceManagerHostname = " + activeRMHost);
+                        LOGGER.info("activeResourceManagerHostname = " + activeRMHost);
 
                     }
                 }
@@ -140,7 +140,7 @@ public class PhoenixMRJobUtil {
             }
 
             url = urlBuilder.toString();
-            LOG.info("Attempt to get running/submitted jobs information from RM URL = " + url);
+            LOGGER.info("Attempt to get running/submitted jobs information from RM URL = " + url);
 
             URL obj = new URL(url);
             con = (HttpURLConnection) obj.openConnection();
@@ -155,7 +155,7 @@ public class PhoenixMRJobUtil {
             if (con != null) con.disconnect();
         }
 
-        LOG.info("Result of attempt to get running/submitted jobs from RM - URL=" + url
+        LOGGER.info("Result of attempt to get running/submitted jobs from RM - URL=" + url
                 + ",ResponseCode=" + con.getResponseCode() + ",Response=" + response);
 
         return response;
@@ -182,16 +182,16 @@ public class PhoenixMRJobUtil {
 
     public static void shutdown(ExecutorService pool) throws InterruptedException {
         pool.shutdown();
-        LOG.debug("Shutdown called");
+        LOGGER.debug("Shutdown called");
         pool.awaitTermination(200, TimeUnit.MILLISECONDS);
-        LOG.debug("Await termination called to wait for 200 msec");
+        LOGGER.debug("Await termination called to wait for 200 msec");
         if (!pool.isShutdown()) {
             pool.shutdownNow();
-            LOG.debug("Await termination called to wait for 200 msec");
+            LOGGER.debug("Await termination called to wait for 200 msec");
             pool.awaitTermination(100, TimeUnit.MILLISECONDS);
         }
         if (!pool.isShutdown()) {
-            LOG.warn("Pool did not shutdown");
+            LOGGER.warn("Pool did not shutdown");
         }
     }
 
@@ -222,7 +222,7 @@ public class PhoenixMRJobUtil {
         conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemoryMB);
         conf.set(MRJobConfig.MAP_JAVA_OPTS, XMX_OPT + ((int) (mapMemoryMB * 0.9)) + "m");
 
-        LOG.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB="
+        LOGGER.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB="
                 + conf.get(MRJobConfig.MAP_MEMORY_MB) + ";" + "Map Java Opts="
                 + conf.get(MRJobConfig.MAP_JAVA_OPTS));
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index f7c313f..b7a625a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -31,8 +31,6 @@ import java.util.Properties;
 import javax.annotation.Nullable;
 
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
@@ -50,6 +48,8 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PInteger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Joiner;
@@ -59,7 +59,7 @@ import com.google.common.collect.Lists;
 
 public final class QueryUtil {
 
-    private static final Log LOG = LogFactory.getLog(QueryUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryUtil.class);
 
     /**
      *  Column family name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)}
@@ -353,7 +353,7 @@ public final class QueryUtil {
             throws SQLException, ClassNotFoundException {
         setServerConnection(props);
         String url = getConnectionUrl(props, null, principal);
-        LOG.info("Creating connection with the jdbc url: " + url);
+        LOGGER.info("Creating connection with the jdbc url: " + url);
         return DriverManager.getConnection(url, props);
     }
 
@@ -365,7 +365,7 @@ public final class QueryUtil {
     private static Connection getConnection(Properties props, Configuration conf)
             throws ClassNotFoundException, SQLException {
         String url = getConnectionUrl(props, conf);
-        LOG.info("Creating connection with the jdbc url: " + url);
+        LOGGER.info("Creating connection with the jdbc url: " + url);
         props = PropertiesUtil.combineProperties(props, conf);
         return DriverManager.getConnection(url, props);
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index d6950a2..fe5d045 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -43,7 +43,7 @@ import com.google.common.collect.Maps;
  *
  */
 public class ReadOnlyProps implements Iterable<Entry<String, String>> {
-    private static final Logger logger = LoggerFactory.getLogger(ReadOnlyProps.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ReadOnlyProps.class);
     public static final ReadOnlyProps EMPTY_PROPS = new ReadOnlyProps();
     @Nonnull
     private final Map<String, String> props;
@@ -314,7 +314,7 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
             String value = entry.getValue().toString();
             String oldValue = props.get(key);
             if (!Objects.equal(oldValue, value)) {
-                if (logger.isDebugEnabled()) logger.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
+                if (LOGGER.isDebugEnabled()) LOGGER.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
                 return new ReadOnlyProps(this, overrides);
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 322e461..e308b3d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -32,8 +32,6 @@ import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -58,11 +56,12 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ServerUtil {
     private static final int COPROCESSOR_SCAN_WORKS = VersionUtil.encodeVersion("0.98.6");
-    private static final Log LOG = LogFactory.getLog(ServerUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerUtil.class);
     
     private static final String FORMAT = "ERROR %d (%s): %s";
     private static final Pattern PATTERN = Pattern.compile("ERROR (\\d+) \\((\\w+)\\): (.*)");
@@ -342,7 +341,7 @@ public class ServerUtil {
                     try {
                         connection.close();
                     } catch (IOException e) {
-                        LOG.warn("Unable to close coprocessor connection", e);
+                        LOGGER.warn("Unable to close coprocessor connection", e);
                     }
                 }
                 connections.clear();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 0c3fd22..98e17a1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -144,7 +144,7 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 public class UpgradeUtil {
-    private static final Logger logger = LoggerFactory.getLogger(UpgradeUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpgradeUtil.class);
     private static final byte[] SEQ_PREFIX_BYTES = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_"));
     public static final byte[] UPGRADE_TO_4_7_COLUMN_NAME = Bytes.toBytes("UPGRADE_TO_4_7");
     /**
@@ -268,17 +268,17 @@ public class UpgradeUtil {
                     }
                 }
                 if (sizeBytes >= batchSizeBytes) {
-                    logger.info("Committing bactch of temp rows");
+                    LOGGER.info("Committing bactch of temp rows");
                     target.batch(mutations, null);
                     mutations.clear();
                     sizeBytes = 0;
                 }
             }
             if (!mutations.isEmpty()) {
-                logger.info("Committing last bactch of temp rows");
+                LOGGER.info("Committing last bactch of temp rows");
                 target.batch(mutations, null);
             }
-            logger.info("Successfully completed copy");
+            LOGGER.info("Successfully completed copy");
         } catch (SQLException e) {
             throw e;
         } catch (Exception e) {
@@ -290,12 +290,12 @@ public class UpgradeUtil {
                 try {
                     if (source != null) source.close();
                 } catch (IOException e) {
-                    logger.warn("Exception during close of source table",e);
+                    LOGGER.warn("Exception during close of source table",e);
                 } finally {
                     try {
                         if (target != null) target.close();
                     } catch (IOException e) {
-                        logger.warn("Exception during close of target table",e);
+                        LOGGER.warn("Exception during close of target table",e);
                     }
                 }
             }
@@ -310,7 +310,7 @@ public class UpgradeUtil {
             if (nSaltBuckets <= 0) {
                 return;
             }
-            logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
+            LOGGER.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
             TableDescriptor desc = admin.getDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
             createSequenceSnapshot(admin, conn);
             snapshotCreated = true;
@@ -320,7 +320,7 @@ public class UpgradeUtil {
             admin.createTable(desc, splitPoints);
             restoreSequenceSnapshot(admin, conn);
             success = true;
-            logger.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
+            LOGGER.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
         } catch (IOException e) {
             throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e);
         } finally {
@@ -329,14 +329,14 @@ public class UpgradeUtil {
                     try {
                         deleteSequenceSnapshot(admin);
                     } catch (SQLException e) {
-                        logger.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
+                        LOGGER.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
                     }
                 }
             } finally {
                 try {
                     admin.close();
                 } catch (IOException e) {
-                    logger.warn("Exception while closing admin during pre-split", e);
+                    LOGGER.warn("Exception while closing admin during pre-split", e);
                 }
             }
         }
@@ -459,8 +459,8 @@ public class UpgradeUtil {
                     createIndex.append(")");
                 }
                 createIndex.append(" ASYNC");
-                logger.info("Index creation query is : " + createIndex.toString());
-                logger.info("Dropping the index " + indexTableName
+                LOGGER.info("Index creation query is : " + createIndex.toString());
+                LOGGER.info("Dropping the index " + indexTableName
                     + " to clean up the index details from SYSTEM.CATALOG.");
                 PhoenixConnection localConnection = null;
                 if (tenantId != null) {
@@ -471,9 +471,9 @@ public class UpgradeUtil {
                     (localConnection == null ? globalConnection : localConnection).createStatement().execute(
                         "DROP INDEX IF EXISTS " + indexTableName + " ON "
                                 + SchemaUtil.getTableName(schemaName, dataTableName));
-                    logger.info("Recreating the index " + indexTableName);
+                    LOGGER.info("Recreating the index " + indexTableName);
                     (localConnection == null ? globalConnection : localConnection).createStatement().execute(createIndex.toString());
-                    logger.info("Created the index " + indexTableName);
+                    LOGGER.info("Created the index " + indexTableName);
                 } finally {
                     props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
                     if (localConnection != null) {
@@ -662,12 +662,12 @@ public class UpgradeUtil {
     }
     @SuppressWarnings("deprecation")
     public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, PTable oldTable) throws SQLException {
-        logger.info("Upgrading SYSTEM.SEQUENCE table");
+        LOGGER.info("Upgrading SYSTEM.SEQUENCE table");
 
         byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
         Table sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
         try {
-            logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
+            LOGGER.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
             Cell saltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, 
                     PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
@@ -703,7 +703,7 @@ public class UpgradeUtil {
                         return true;
                     }
                 }
-                logger.info("SYSTEM.SEQUENCE table has already been upgraded");
+                LOGGER.info("SYSTEM.SEQUENCE table has already been upgraded");
                 return false;
             }
             
@@ -721,7 +721,7 @@ public class UpgradeUtil {
                 Table seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
                 try {
                     boolean committed = false;
-                    logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
+                    LOGGER.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
                     ResultScanner scanner = seqTable.getScanner(scan);
                     try {
                         Result result;
@@ -756,7 +756,7 @@ public class UpgradeUtil {
                                     }
                                 }
                                 if (sizeBytes >= batchSizeBytes) {
-                                    logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
+                                    LOGGER.info("Committing bactch of SYSTEM.SEQUENCE rows");
                                     seqTable.batch(mutations, null);
                                     mutations.clear();
                                     sizeBytes = 0;
@@ -765,11 +765,11 @@ public class UpgradeUtil {
                             }
                         }
                         if (!mutations.isEmpty()) {
-                            logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
+                            LOGGER.info("Committing last bactch of SYSTEM.SEQUENCE rows");
                             seqTable.batch(mutations, null);
                         }
                         preSplitSequenceTable(conn, nSaltBuckets);
-                        logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
+                        LOGGER.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
                         success = true;
                         return true;
                     } catch (InterruptedException e) {
@@ -792,10 +792,10 @@ public class UpgradeUtil {
                                         sysTable.put(unsaltPut);
                                         success = true;
                                     } finally {
-                                        if (!success) logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+                                        if (!success) LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                                     }
                                 } else { // We're screwed b/c we've already committed some salted sequences...
-                                    logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+                                    LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                                 }
                             }
                         }
@@ -806,7 +806,7 @@ public class UpgradeUtil {
                     try {
                         seqTable.close();
                     } catch (IOException e) {
-                        logger.warn("Exception during close",e);
+                        LOGGER.warn("Exception during close",e);
                     }
                 }
             }
@@ -817,7 +817,7 @@ public class UpgradeUtil {
             try {
                 sysTable.close();
             } catch (IOException e) {
-                logger.warn("Exception during close",e);
+                LOGGER.warn("Exception during close",e);
             }
         }
         
@@ -869,7 +869,7 @@ public class UpgradeUtil {
         try {
             // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
             metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
-            logger.info("Upgrading metadata to support adding columns to tables with views");
+            LOGGER.info("Upgrading metadata to support adding columns to tables with views");
             String getBaseTableAndViews = "SELECT "
                     + COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, "
                     + TENANT_ID + ", "
@@ -1105,7 +1105,7 @@ public class UpgradeUtil {
         try {
             // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
             metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
-            logger.info("Upgrading metadata to add parent to child links for views");
+            LOGGER.info("Upgrading metadata to add parent to child links for views");
             metaConnection.commit();
             //     physical table 
             //         |  
@@ -1178,7 +1178,7 @@ public class UpgradeUtil {
         try {
             // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
             metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
-            logger.info("Upgrading metadata to add parent to child links for views");
+            LOGGER.info("Upgrading metadata to add parent to child links for views");
             metaConnection.commit();
             String createChildLink = "UPSERT INTO SYSTEM.CHILD_LINK(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE) " +
                                         "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE " + 
@@ -1201,7 +1201,7 @@ public class UpgradeUtil {
     	// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
         try (PhoenixConnection queryConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
         		PhoenixConnection upsertConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) {
-            logger.info("Upgrading metadata to add parent links for indexes on views");
+            LOGGER.info("Upgrading metadata to add parent links for indexes on views");
 			String indexQuery = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = "
 					+ LinkType.INDEX_TABLE.getSerializedValue();
 			String createViewIndexLink = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) VALUES (?,?,?,?,?) ";
@@ -1380,7 +1380,7 @@ public class UpgradeUtil {
                         view = PhoenixRuntime.getTable(newConn, viewName);
                     } catch (TableNotFoundException e) {
                         // Ignore
-                        logger.warn("Error getting PTable for view: " + viewName);
+                        LOGGER.warn("Error getting PTable for view: " + viewName);
                         continue;
                     }
                     syncUpdateCacheFreqForIndexesOfTable(view, stmt);
@@ -1414,7 +1414,7 @@ public class UpgradeUtil {
                 table = PhoenixRuntime.getTable(conn, tableName);
             } catch (TableNotFoundException e) {
                 // Ignore tables not mapped to a Phoenix Table
-                logger.warn("Error getting PTable for HBase table: " + tableName);
+                LOGGER.warn("Error getting PTable for HBase table: " + tableName);
                 continue;
             }
             if (table.getType() == PTableType.INDEX) {
@@ -1727,7 +1727,7 @@ public class UpgradeUtil {
             if (isTable && !bypassUpgrade) {
                 String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
                 System.out.println(msg);
-                logger.info(msg);
+                LOGGER.info(msg);
                 admin.disableTable(physicalName);
                 admin.snapshot(snapshotName, physicalName);
                 admin.enableTable(physicalName);
@@ -1742,7 +1742,7 @@ public class UpgradeUtil {
             }
             String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "...";
             System.out.println(msg);
-            logger.info(msg);
+            LOGGER.info(msg);
             ResultSet rs;
             if (!bypassUpgrade) {
                 rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName);
@@ -1796,9 +1796,9 @@ public class UpgradeUtil {
             success = true;
             msg = "Completed upgrade of " + escapedTableName + tenantInfo;
             System.out.println(msg);
-            logger.info(msg);
+            LOGGER.info(msg);
         } catch (Exception e) {
-            logger.error("Exception during upgrade of " + physicalName + ":", e);
+            LOGGER.error("Exception during upgrade of " + physicalName + ":", e);
         } finally {
             boolean restored = false;
             try {
@@ -1808,25 +1808,25 @@ public class UpgradeUtil {
                     admin.enableTable(physicalName);
                     String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
                     System.out.println(msg);
-                    logger.info(msg);
+                    LOGGER.info(msg);
                 }
                 restored = true;
             } catch (Exception e) {
-                logger.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
+                LOGGER.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
             } finally {
                 try {
                     if (restoreSnapshot && restored) {
                         admin.deleteSnapshot(snapshotName);
                     }
                 } catch (Exception e) {
-                    logger.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
+                    LOGGER.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
                 } finally {
                     try {
                         if (admin != null) {
                             admin.close();
                         }
                     } catch (IOException e) {
-                        logger.warn("Unable to close admin after upgrade:", e);
+                        LOGGER.warn("Unable to close admin after upgrade:", e);
                     }
                 }
             }
@@ -2037,7 +2037,7 @@ public class UpgradeUtil {
         }
         if (ts != null) {
             // Update flag to represent table is mapped to namespace
-            logger.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
+            LOGGER.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
                     phoenixTableName));
             Put put = new Put(tableKey, ts);
             put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES,
@@ -2056,19 +2056,19 @@ public class UpgradeUtil {
             boolean destTableExists=admin.tableExists(dstTable);
             if (!destTableExists) {
                 String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
-                logger.info("Disabling table " + srcTableName + " ..");
+                LOGGER.info("Disabling table " + srcTableName + " ..");
                 admin.disableTable(srcTable);
-                logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
+                LOGGER.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
                 admin.snapshot(snapshotName, srcTable);
-                logger.info(
+                LOGGER.info(
                         String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
                 admin.cloneSnapshot(snapshotName, dstTable);
-                logger.info(String.format("deleting old table %s..", srcTableName));
+                LOGGER.info(String.format("deleting old table %s..", srcTableName));
                 admin.deleteTable(srcTable);
-                logger.info(String.format("deleting snapshot %s..", snapshotName));
+                LOGGER.info(String.format("deleting snapshot %s..", snapshotName));
                 admin.deleteSnapshot(snapshotName);
             } else {
-                logger.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
+                LOGGER.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
             }
         }
     }
@@ -2110,15 +2110,15 @@ public class UpgradeUtil {
 
             if (table.isNamespaceMapped()) { throw new IllegalArgumentException("Table is already upgraded"); }
             if (!schemaName.equals("")) {
-                logger.info(String.format("Creating schema %s..", schemaName));
+                LOGGER.info(String.format("Creating schema %s..", schemaName));
                 conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
             }
             String oldPhysicalName = table.getPhysicalName().getString();
             String newPhysicalTablename = SchemaUtil.normalizeIdentifier(
                     SchemaUtil.getPhysicalTableName(oldPhysicalName, readOnlyProps).getNameAsString());
-            logger.info(String.format("Upgrading %s %s..", table.getType(), fullTableName));
-            logger.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
-            logger.info(String.format("teanantId %s..", conn.getTenantId()));
+            LOGGER.info(String.format("Upgrading %s %s..", table.getType(), fullTableName));
+            LOGGER.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
+            LOGGER.info(String.format("teanantId %s..", conn.getTenantId()));
 
             TableViewFinderResult childViewsResult = new TableViewFinderResult();
             try (Table childLinkTable =
@@ -2159,12 +2159,12 @@ public class UpgradeUtil {
                     boolean updateLink = true;
                     if (srcTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
                         // Skip already migrated
-                        logger.info(String.format("skipping as it seems index '%s' is already upgraded..",
+                        LOGGER.info(String.format("skipping as it seems index '%s' is already upgraded..",
                                 index.getName()));
                         continue;
                     }
                     if (MetaDataUtil.isLocalIndex(srcTableName)) {
-                        logger.info(String.format("local index '%s' found with physical hbase table name ''..",
+                        LOGGER.info(String.format("local index '%s' found with physical hbase table name ''..",
                                 index.getName(), srcTableName));
                         destTableName = Bytes
                                 .toString(MetaDataUtil.getLocalIndexPhysicalName(newPhysicalTablename.getBytes()));
@@ -2173,18 +2173,18 @@ public class UpgradeUtil {
                                 .execute(String.format("ALTER TABLE %s set " + MetaDataUtil.PARENT_TABLE_KEY + "='%s'",
                                         phoenixTableName, table.getPhysicalName()));
                     } else if (MetaDataUtil.isViewIndex(srcTableName)) {
-                        logger.info(String.format("View index '%s' found with physical hbase table name ''..",
+                        LOGGER.info(String.format("View index '%s' found with physical hbase table name ''..",
                                 index.getName(), srcTableName));
                         destTableName = Bytes
                                 .toString(MetaDataUtil.getViewIndexPhysicalName(newPhysicalTablename.getBytes()));
                     } else {
-                        logger.info(String.format("Global index '%s' found with physical hbase table name ''..",
+                        LOGGER.info(String.format("Global index '%s' found with physical hbase table name ''..",
                                 index.getName(), srcTableName));
                         destTableName = SchemaUtil
                                 .getPhysicalTableName(index.getPhysicalName().getString(), readOnlyProps)
                                 .getNameAsString();
                     }
-                    logger.info(String.format("Upgrading index %s..", index.getName()));
+                    LOGGER.info(String.format("Upgrading index %s..", index.getName()));
                     if (!(table.getType() == PTableType.VIEW && !MetaDataUtil.isViewIndex(srcTableName)
                             && IndexType.LOCAL != index.getIndexType())) {
                         mapTableToNamespace(admin, metatable, srcTableName, destTableName, readOnlyProps,
@@ -2192,7 +2192,7 @@ public class UpgradeUtil {
                                 conn.getTenantId());
                     }
                     if (updateLink) {
-                        logger.info(String.format("Updating link information for index '%s' ..", index.getName()));
+                        LOGGER.info(String.format("Updating link information for index '%s' ..", index.getName()));
                         updateLink(conn, srcTableName, destTableName,index.getSchemaName(),index.getTableName());
                         conn.commit();
                     }
@@ -2207,14 +2207,14 @@ public class UpgradeUtil {
                 throw new RuntimeException("Error: problem occured during upgrade. Table is not upgraded successfully");
             }
             if (table.getType() == PTableType.VIEW) {
-                logger.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
+                LOGGER.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
                 updateLink(conn, oldPhysicalName, newPhysicalTablename,table.getSchemaName(),table.getTableName());
                 conn.commit();
                 
                 // if the view is a first level child, then we need to create the PARENT_TABLE link
                 // that was overwritten by the PHYSICAL_TABLE link 
                 if (table.getParentName().equals(table.getPhysicalName())) {
-                    logger.info(String.format("Creating PARENT link for view '%s' ..", table.getTableName()));
+                    LOGGER.info(String.format("Creating PARENT link for view '%s' ..", table.getTableName()));
                     // Add row linking view to its parent 
                     PreparedStatement linkStatement = conn.prepareStatement(MetaDataClient.CREATE_VIEW_LINK);
                     linkStatement.setString(1, Bytes.toStringBinary(tenantIdBytes));
@@ -2308,7 +2308,7 @@ public class UpgradeUtil {
                     conn.close();
                 conn = DriverManager.getConnection(connUrl, props).unwrap(PhoenixConnection.class);
             }
-            logger.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
+            LOGGER.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
             UpgradeUtil.upgradeTable(conn, viewName);
             prevTenantId = tenantId;
         }
@@ -2371,7 +2371,7 @@ public class UpgradeUtil {
                     currentValueArray, new SQLException[1]);
 
                 if (sqlExceptions[0] != null) {
-                    logger.error("Unable to convert view index sequence because of error. " +
+                    LOGGER.error("Unable to convert view index sequence because of error. " +
                         "It will need to be converted manually, " +
                         " or there's a risk that two view indexes of the same base table " +
                         "will have colliding view index ids.", sqlExceptions[0]);
@@ -2407,7 +2407,7 @@ public class UpgradeUtil {
                         false, EnvironmentEdgeManager.currentTimeMillis());
                 }
             } catch(SequenceAlreadyExistsException sae) {
-                logger.info("Tried to create view index sequence "
+                LOGGER.info("Tried to create view index sequence "
                     + SchemaUtil.getTableName(sae.getSchemaName(), sae.getSequenceName()) +
                     " during upgrade but it already existed. This is probably fine.");
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
index d9ce5f2..9ef7356 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpsertExecutor.java
@@ -61,7 +61,7 @@ public abstract class UpsertExecutor<RECORD, FIELD> implements Closeable {
         void errorOnRecord(RECORD record, Throwable throwable);
     }
 
-    private static final Logger LOG = LoggerFactory.getLogger(UpsertExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpsertExecutor.class);
 
     protected final Connection conn;
     protected final List<ColumnInfo> columnInfos;
@@ -77,7 +77,7 @@ public abstract class UpsertExecutor<RECORD, FIELD> implements Closeable {
         PreparedStatement preparedStatement;
         try {
             String upsertSql = QueryUtil.constructUpsertStatement(tableName, columnInfoList);
-            LOG.info("Upserting SQL data with {}", upsertSql);
+            LOGGER.info("Upserting SQL data with {}", upsertSql);
             preparedStatement = conn.prepareStatement(upsertSql);
         } catch (SQLException e) {
             throw new RuntimeException(e);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
index 047dcdb..2b51ab1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
@@ -21,39 +21,39 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.UUID;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ZKBasedMasterElectionUtil {
 
-    private static final Log LOG = LogFactory.getLog(ZKBasedMasterElectionUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ZKBasedMasterElectionUtil.class);
 
     public static boolean acquireLock(ZKWatcher zooKeeperWatcher, String parentNode,
             String lockName) throws KeeperException, InterruptedException {
         // Create the parent node as Persistent
-        LOG.info("Creating the parent lock node:" + parentNode);
+        LOGGER.info("Creating the parent lock node:" + parentNode);
         ZKUtil.createWithParents(zooKeeperWatcher, parentNode);
 
         // Create the ephemeral node
         String lockNode = parentNode + "/" + lockName;
         String nodeValue = getHostName() + "_" + UUID.randomUUID().toString();
-        LOG.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue);
+        LOGGER.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue);
         // Create the ephemeral node
         try {
             zooKeeperWatcher.getRecoverableZooKeeper().create(lockNode, Bytes.toBytes(nodeValue),
                 Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
         } catch (KeeperException.NodeExistsException e) {
-            LOG.info("Could not acquire lock. Another process had already acquired the lock on Node "
+            LOGGER.info("Could not acquire lock. Another process had already acquired the lock on Node "
                     + lockName);
             return false;
         }
-        LOG.info("Obtained the lock :" + lockNode);
+        LOGGER.info("Obtained the lock :" + lockNode);
         return true;
     }
 
@@ -62,7 +62,7 @@ public class ZKBasedMasterElectionUtil {
         try {
             host = InetAddress.getLocalHost().getCanonicalHostName();
         } catch (UnknownHostException e) {
-            LOG.error("UnknownHostException while trying to get the Local Host address : ", e);
+            LOGGER.error("UnknownHostException while trying to get the Local Host address : ", e);
         }
         return host;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 0b5881f..98eeb12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -54,7 +54,7 @@ import com.google.common.base.Function;
 /** {@link UpsertExecutor} over {@link CSVRecord}s. */
 public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(CsvUpsertExecutor.class);
 
     protected final String arrayElementSeparator;
 
@@ -95,10 +95,10 @@ public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on CSVRecord " + csvRecord, e);
+                LOGGER.debug("Error on CSVRecord " + csvRecord, e);
             }
             upsertListener.errorOnRecord(csvRecord, e);
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
index 9a6fef0..db5cdbf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
@@ -52,7 +52,7 @@ import com.google.common.base.Function;
 /** {@link UpsertExecutor} over {@link Map} objects, as parsed from JSON. */
 public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(JsonUpsertExecutor.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(JsonUpsertExecutor.class);
 
     /** Testing constructor. Do not use in prod. */
     @VisibleForTesting
@@ -106,10 +106,10 @@ public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
index 0388d9c..05d009c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java
@@ -33,7 +33,7 @@ import com.google.common.annotations.VisibleForTesting;
 /** {@link UpsertExecutor} over {@link Map} objects, convert input record into {@link Map} objects by using regex. */
 public class RegexUpsertExecutor extends JsonUpsertExecutor {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(RegexUpsertExecutor.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(RegexUpsertExecutor.class);
 
     /** Testing constructor. Do not use in prod. */
     @VisibleForTesting
@@ -69,10 +69,10 @@ public class RegexUpsertExecutor extends JsonUpsertExecutor {
             preparedStatement.execute();
             upsertListener.upsertDone(++upsertCount);
         } catch (Exception e) {
-            if (LOG.isDebugEnabled()) {
+            if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOG.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
index 39e9680..4031e45 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
@@ -36,15 +34,15 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
-
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utility class for testing indexing
  */
 public class IndexTestingUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexTestingUtils.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(IndexTestingUtils.class);
   private static final String MASTER_INFO_PORT_KEY = "hbase.master.info.port";
   private static final String RS_INFO_PORT_KEY = "hbase.regionserver.info.port";
   
@@ -66,7 +64,7 @@ public class IndexTestingUtils {
   @SuppressWarnings("javadoc")
   public static void verifyIndexTableAtTimestamp(Table index1, List<KeyValue> expected,
       long start, long end, byte[] startKey, byte[] endKey) throws IOException {
-    LOG.debug("Scanning " + index1.getName().getNameAsString() + " between times (" + start
+    LOGGER.debug("Scanning " + index1.getName().getNameAsString() + " between times (" + start
         + ", " + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey)
         + "].");
     Scan s = new Scan(startKey, endKey);
@@ -77,7 +75,7 @@ public class IndexTestingUtils {
     ResultScanner scanner = index1.getScanner(s);
     for (Result r : scanner) {
       received.addAll(r.listCells());
-      LOG.debug("Received: " + r.listCells());
+      LOGGER.debug("Received: " + r.listCells());
     }
     scanner.close();
     assertEquals("Didn't get the expected kvs from the index table!", expected, received);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
index e9e025c..90d2920 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java
@@ -17,20 +17,20 @@
  */
 package org.apache.phoenix.hbase.index;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Abortable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * TEst helper to stub out an {@link Abortable} when needed.
  */
 public class StubAbortable implements Abortable {
-  private static final Log LOG = LogFactory.getLog(StubAbortable.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(StubAbortable.class);
   private boolean abort;
 
   @Override
   public void abort(String reason, Throwable e) {
-    LOG.info("Aborting: " + reason, e);
+    LOGGER.info("Aborting: " + reason, e);
     abort = true;
   }
 
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 151a8af..357206a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -31,8 +31,6 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
@@ -59,9 +57,11 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestIndexWriter {
-  private static final Log LOG = LogFactory.getLog(TestIndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestIndexWriter.class);
   @Rule
   public IndexTableName testName = new IndexTableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -93,8 +93,8 @@ public class TestIndexWriter {
    */
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + testName.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + testName.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     Stoppable stop = Mockito.mock(Stoppable.class);
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -181,13 +181,13 @@ public class TestIndexWriter {
         Mockito.doAnswer(new Answer<Void>() {
             @Override
             public Void answer(InvocationOnMock invocation) throws Throwable {
-                LOG.info("Write started");
+                LOGGER.info("Write started");
                 writeStartedLatch.countDown();
                 // when we interrupt the thread for shutdown, we should see this throw an interrupt too
                 try {
                     waitOnAbortedLatch.await();
                 } catch (InterruptedException e) {
-                    LOG.info("Correctly interrupted while writing!");
+                    LOGGER.info("Correctly interrupted while writing!");
                     throw e;
                 }
                 return null;
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index d870916..a0ec76f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
@@ -52,13 +50,15 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 
 public class TestParalleIndexWriter {
 
-  private static final Log LOG = LogFactory.getLog(TestParalleIndexWriter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleIndexWriter.class);
   @Rule
   public IndexTableName test = new IndexTableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -86,8 +86,8 @@ public class TestParalleIndexWriter {
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + test.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + test.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     Abortable abort = new StubAbortable();
     Stoppable stop = Mockito.mock(Stoppable.class);
     ExecutorService exec = Executors.newFixedThreadPool(1);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 011547e..6eac6b9 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
@@ -49,13 +47,15 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 
 public class TestParalleWriterIndexCommitter {
 
-  private static final Log LOG = LogFactory.getLog(TestParalleWriterIndexCommitter.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleWriterIndexCommitter.class);
   @Rule
   public IndexTableName test = new IndexTableName();
   private final byte[] row = Bytes.toBytes("row");
@@ -83,8 +83,8 @@ public class TestParalleWriterIndexCommitter {
   @SuppressWarnings({ "unchecked"})
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
-    LOG.info("Starting " + test.getTableNameString());
-    LOG.info("Current thread is interrupted: " + Thread.interrupted());
+    LOGGER.info("Starting " + test.getTableNameString());
+    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 3f37ed9..2188bfc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -31,8 +31,6 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -74,6 +72,8 @@ import org.junit.Assert;
 import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Multimap;
 
@@ -85,7 +85,7 @@ import com.google.common.collect.Multimap;
 
 public class TestWALRecoveryCaching {
 
-  private static final Log LOG = LogFactory.getLog(TestWALRecoveryCaching.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(TestWALRecoveryCaching.class);
   private static final long ONE_SEC = 1000;
   private static final long ONE_MIN = 60 * ONE_SEC;
   private static final long TIMEOUT = ONE_MIN;
@@ -117,10 +117,10 @@ public class TestWALRecoveryCaching {
                 org.apache.hadoop.hbase.client.RegionInfo info, WALKey logKey,
                 org.apache.hadoop.hbase.wal.WALEdit logEdit) throws IOException {
       try {
-        LOG.debug("Restoring logs for index table");
+        LOGGER.debug("Restoring logs for index table");
         if (allowIndexTableToRecover != null) {
           allowIndexTableToRecover.await();
-          LOG.debug("Completed index table recovery wait latch");
+          LOGGER.debug("Completed index table recovery wait latch");
         }
       } catch (InterruptedException e) {
         Assert.fail("Should not be interrupted while waiting to allow the index to restore WALs.");
@@ -140,9 +140,9 @@ public class TestWALRecoveryCaching {
     @Override
     public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted,
         Exception cause) throws IOException {
-      LOG.debug("Found index update failure!");
+      LOGGER.debug("Found index update failure!");
       if (allowIndexTableToRecover != null) {
-        LOG.info("failed index write on WAL recovery - allowing index table to be restored.");
+        LOGGER.info("failed index write on WAL recovery - allowing index table to be restored.");
         allowIndexTableToRecover.countDown();
       }
       super.handleFailure(attempted, cause);
@@ -219,25 +219,25 @@ public class TestWALRecoveryCaching {
       Bytes.toBytes(indexedTableName)));
 
     // log all the current state of the server
-    LOG.info("Current Server/Region paring: ");
+    LOGGER.info("Current Server/Region paring: ");
     for (RegionServerThread t : util.getMiniHBaseCluster().getRegionServerThreads()) {
       // check all the conditions for the server to be done
       HRegionServer server = t.getRegionServer();
       if (server.isStopping() || server.isStopped() || server.isAborted()) {
-        LOG.info("\t== Offline: " + server.getServerName());
+        LOGGER.info("\t== Offline: " + server.getServerName());
         continue;
       }
       
       List<HRegion> regions = server.getRegions();
-      LOG.info("\t" + server.getServerName() + " regions: " + regions);
+      LOGGER.info("\t" + server.getServerName() + " regions: " + regions);
     }
 
-    LOG.debug("Killing server " + shared);
+    LOGGER.debug("Killing server " + shared);
     util.getMiniHBaseCluster().killRegionServer(shared);
-    LOG.debug("Waiting on server " + shared + "to die");
+    LOGGER.debug("Waiting on server " + shared + "to die");
     util.getMiniHBaseCluster().waitForRegionServerToStop(shared, TIMEOUT);
     // force reassign the regions from the table
-    // LOG.debug("Forcing region reassignment from the killed server: " + shared);
+    // LOGGER.debug("Forcing region reassignment from the killed server: " + shared);
     // for (HRegion region : online) {
     // util.getMiniHBaseCluster().getMaster().assign(region.getRegionName());
     // }
@@ -260,7 +260,7 @@ public class TestWALRecoveryCaching {
     ResultScanner scanner = index.getScanner(s);
     int count = 0;
     for (Result r : scanner) {
-      LOG.info("Got index table result:" + r);
+      LOGGER.info("Got index table result:" + r);
       count++;
     }
     assertEquals("Got an unexpected found of index rows", 1, count);
@@ -318,7 +318,7 @@ public class TestWALRecoveryCaching {
         // find the regionserver that matches the passed server
         List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);
 
-        LOG.info("Shutting down and reassigning regions from " + server);
+        LOGGER.info("Shutting down and reassigning regions from " + server);
         cluster.stopRegionServer(server);
         cluster.waitForRegionServerToStop(server, TIMEOUT);
 
@@ -327,13 +327,13 @@ public class TestWALRecoveryCaching {
           cluster.getMaster().getAssignmentManager().assign(region.getRegionInfo());
         }
 
-        LOG.info("Starting region server:" + server.getHostname());
+        LOGGER.info("Starting region server:" + server.getHostname());
         cluster.startRegionServer(server.getHostname(), server.getPort());
 
         cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), TIMEOUT);
 
         // start a server to get back to the base number of servers
-        LOG.info("STarting server to replace " + server);
+        LOGGER.info("STarting server to replace " + server);
         cluster.startRegionServer();
         break;
       }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
index 1af01ab..d2bccb7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
@@ -18,12 +18,12 @@
 package org.apache.phoenix.metrics;
 
 import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.phoenix.trace.TracingUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Simple sink that just logs the output of all the metrics that start with
@@ -31,7 +31,7 @@ import org.apache.phoenix.trace.TracingUtils;
  */
 public class LoggingSink implements MetricsSink {
 
-    private static final Log LOG = LogFactory.getLog(LoggingSink.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LoggingSink.class);
 
     @Override
     public void init(SubsetConfiguration config) {
@@ -42,14 +42,14 @@ public class LoggingSink implements MetricsSink {
         // we could wait until flush, but this is a really lightweight process, so we just write
         // them
         // as soon as we get them
-        if (!LOG.isDebugEnabled()) {
+        if (!LOGGER.isDebugEnabled()) {
             return;
         }
-        LOG.debug("Found record:" + record.name());
+        LOGGER.debug("Found record:" + record.name());
         for (AbstractMetric metric : record.metrics()) {
             // just print the metric we care about
             if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
-                LOG.debug("\t metric:" + metric);
+                LOGGER.debug("\t metric:" + metric);
             }
         }
     }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 96992ee..a6653ee 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -182,7 +182,7 @@ public abstract class BaseTest {
     public static final String DRIVER_CLASS_NAME_ATTRIB = "phoenix.driver.class.name";
     private static final double ZERO = 1e-9;
     private static final Map<String,String> tableDDLMap;
-    private static final Logger logger = LoggerFactory.getLogger(BaseTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseTest.class);
     @ClassRule
     public static TemporaryFolder tmpFolder = new TemporaryFolder();
     private static final int dropTableTimeout = 300; // 5 mins should be long enough.
@@ -455,7 +455,7 @@ public abstract class BaseTest {
             try {
                 assertTrue(destroyDriver(driver));
             } catch (Throwable t) {
-                logger.error("Exception caught when destroying phoenix test driver", t);
+                LOGGER.error("Exception caught when destroying phoenix test driver", t);
             } finally {
                 driver = null;
             }
@@ -485,18 +485,18 @@ public abstract class BaseTest {
                         try {
                             u.shutdownMiniMapReduceCluster();
                         } catch (Throwable t) {
-                            logger.error(
+                            LOGGER.error(
                                 "Exception caught when shutting down mini map reduce cluster", t);
                         } finally {
                             try {
                                 u.shutdownMiniCluster();
                             } catch (Throwable t) {
-                                logger.error("Exception caught when shutting down mini cluster", t);
+                                LOGGER.error("Exception caught when shutting down mini cluster", t);
                             } finally {
                                 try {
                                     ConnectionFactory.shutdown();
                                 } finally {
-                                    logger.info(
+                                    LOGGER.info(
                                         "Time in seconds spent in shutting down mini cluster with "
                                                 + numTables + " tables: "
                                                 + (System.currentTimeMillis() - startTime) / 1000);
@@ -674,7 +674,7 @@ public abstract class BaseTest {
                     DriverManager.deregisterDriver(driver);
                 }
             } catch (Exception e) {
-                logger.warn("Unable to close registered driver: " + driver, e);
+                LOGGER.warn("Unable to close registered driver: " + driver, e);
             }
         }
         return false;
@@ -778,16 +778,17 @@ public abstract class BaseTest {
             int numTables = TABLE_COUNTER.get();
             TABLE_COUNTER.set(0);
             if(isDistributedClusterModeEnabled(config)) {
-                logger.info(
+                LOGGER.info(
                         "Deleting old tables on distributed cluster because number of tables is likely greater than "
                                 + TEARDOWN_THRESHOLD);
                 deletePriorMetaData(HConstants.LATEST_TIMESTAMP, url);
             } else {
-                logger.info(
+                LOGGER.info(
                     "Shutting down mini cluster because number of tables on this mini cluster is likely greater than "
                             + TEARDOWN_THRESHOLD);
                 tearDownMiniClusterAsync(numTables);
             }
+
         }
     }
 
@@ -962,9 +963,9 @@ public abstract class BaseTest {
                 try {
                     conn.createStatement().executeUpdate(ddl);
                 } catch (NewerTableAlreadyExistsException ex) {
-                    logger.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion");
+                    LOGGER.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion");
                 } catch (TableNotFoundException ex) {
-                    logger.info("Table " + fullTableName + " is already deleted.");
+                    LOGGER.info("Table " + fullTableName + " is already deleted.");
                 }
             }
             rs.close();
@@ -1014,7 +1015,7 @@ public abstract class BaseTest {
                 lastTenantId = tenantId;
             }
 
-            logger.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
+            LOGGER.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
             conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
         }
         rs.close();
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
index 4e678f3..815cf67 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
@@ -19,8 +19,7 @@ package org.apache.phoenix.tool;
 
 import com.google.common.collect.Maps;
 import com.google.gson.Gson;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
@@ -31,6 +30,8 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -57,7 +58,8 @@ import static org.junit.Assert.assertTrue;
 @Category(NeedsOwnMiniClusterTest.class)
 public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 
-	private static final Log logger = LogFactory.getLog(ParameterizedPhoenixCanaryToolIT.class);
+	private static final Logger LOGGER =
+			LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
 	private static final String stdOutSink
 			= "org.apache.phoenix.tool.PhoenixCanaryTool$StdOutSink";
 	private static final String fileOutSink
@@ -107,7 +109,7 @@ public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 			tearDownMiniClusterAsync(1);
 			setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
 					new ReadOnlyProps(clientProps.entrySet().iterator()));
-			logger.info("New cluster is spinned up with test parameters " +
+			LOGGER.info("New cluster is spinned up with test parameters " +
 					"isPositiveTestType" + this.isPositiveTestType +
 					"isNamespaceEnabled" + this.isNamespaceEnabled +
 					"resultSinkOption" + this.resultSinkOption);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
index bf25682..f3f189c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
@@ -21,8 +21,6 @@ import java.sql.Statement;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -32,6 +30,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /*
  * This test is wrt to https://issues.apache.org/jira/browse/PHOENIX-4993.Test checks region
@@ -39,7 +39,8 @@ import org.junit.Test;
  */
 public class CoprocessorHConnectionTableFactoryTest extends BaseUniqueNamesOwnClusterIT {
   private static String ORG_PREFIX = "ORG";
-  private static final Log LOG = LogFactory.getLog(CoprocessorHConnectionTableFactoryTest.class);
+  private static final Logger LOGGER =
+          LoggerFactory.getLogger(CoprocessorHConnectionTableFactoryTest.class);
 
   @BeforeClass
   public static final void doSetup() throws Exception {
@@ -68,7 +69,7 @@ public class CoprocessorHConnectionTableFactoryTest extends BaseUniqueNamesOwnCl
       }
       conn.commit();
     } catch (Exception e) {
-      LOG.error("Client side exception:" + e);
+      LOGGER.error("Client side exception:" + e);
     }
 
   }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 6ffc34d..90e3158 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -53,8 +53,6 @@ import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
@@ -130,6 +128,8 @@ import org.apache.phoenix.schema.stats.GuidePostsKey;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.transaction.TransactionFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Objects;
 import com.google.common.collect.Lists;
@@ -137,7 +137,7 @@ import com.google.common.collect.Lists;
 
 
 public class TestUtil {
-    private static final Log LOG = LogFactory.getLog(TestUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TestUtil.class);
     
     private static final Long ZERO = new Long(0);
     public static final String DEFAULT_SCHEMA_NAME = "S";
@@ -832,11 +832,11 @@ public class TestUtil {
                 try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                     ResultScanner scanner = htableForRawScan.getScanner(scan);
                     List<Result> results = Lists.newArrayList(scanner);
-                    LOG.info("Results: " + results);
+                    LOGGER.info("Results: " + results);
                     compactionDone = results.isEmpty();
                     scanner.close();
                 }
-                LOG.info("Compaction done: " + compactionDone);
+                LOGGER.info("Compaction done: " + compactionDone);
                 
                 // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
                 if (!compactionDone && table.isTransactional()) {
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index 51d6743..2b55e29 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -49,7 +49,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class Pherf {
-    private static final Logger logger = LoggerFactory.getLogger(Pherf.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Pherf.class);
     private static final Options options = new Options();
     private final PhoenixUtil phoenixUtil = PhoenixUtil.create();
 
@@ -142,8 +142,8 @@ public class Pherf {
                         properties.getProperty("pherf.default.monitorFrequency");
         properties.setProperty("pherf.default.monitorFrequency", monitorFrequency);
 
-        logger.debug("Using Monitor: " + monitor);
-        logger.debug("Monitor Frequency Ms:" + monitorFrequency);
+        LOGGER.debug("Using Monitor: " + monitor);
+        LOGGER.debug("Monitor Frequency Ms:" + monitorFrequency);
         preLoadData = command.hasOption("l");
         executeQuerySets = command.hasOption("q");
         zookeeper = command.getOptionValue("z", "localhost");
@@ -184,10 +184,10 @@ public class Pherf {
         }
         PhoenixUtil.setRowCountOverride(rowCountOverride);
         if (!thinDriver) {
-            logger.info("Using thick driver with ZooKeepers '{}'", zookeeper);
+            LOGGER.info("Using thick driver with ZooKeepers '{}'", zookeeper);
             PhoenixUtil.setZookeeper(zookeeper);
         } else {
-            logger.info("Using thin driver with PQS '{}'", queryServerUrl);
+            LOGGER.info("Using thin driver with PQS '{}'", queryServerUrl);
             // Enables the thin-driver and sets the PQS URL
             PhoenixUtil.useThinDriver(queryServerUrl);
         }
@@ -230,7 +230,7 @@ public class Pherf {
             
             // Compare results and exit  
 			if (null != compareResults) {
-				logger.info("\nStarting to compare results and exiting for " + compareResults);
+                LOGGER.info("\nStarting to compare results and exiting for " + compareResults);
 				new GoogleChartGenerator(compareResults, compareType).readAndRender();
 				return;
             }
@@ -239,7 +239,7 @@ public class Pherf {
 
             // Drop tables with PHERF schema and regex comparison
             if (null != dropPherfTablesRegEx) {
-                logger.info(
+                LOGGER.info(
                         "\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx
                                 + " regex expression.");
                 phoenixUtil.deleteTables(dropPherfTablesRegEx);
@@ -253,7 +253,7 @@ public class Pherf {
             }
 
             if (applySchema) {
-                logger.info("\nStarting to apply schema...");
+                LOGGER.info("\nStarting to apply schema...");
                 SchemaReader
                         reader =
                         (schemaFile == null) ?
@@ -264,7 +264,7 @@ public class Pherf {
 
             // Schema and Data Load
             if (preLoadData) {
-                logger.info("\nStarting Data Load...");
+                LOGGER.info("\nStarting Data Load...");
                 Workload workload = new WriteWorkload(parser, generateStatistics);
                 try {
                     workloadExecutor.add(workload);
@@ -277,26 +277,26 @@ public class Pherf {
                     }
                 }
             } else {
-                logger.info(
+                LOGGER.info(
                         "\nSKIPPED: Data Load and schema creation as -l argument not specified");
             }
 
             // Execute multi-threaded query sets
             if (executeQuerySets) {
-                logger.info("\nStarting to apply Execute Queries...");
+                LOGGER.info("\nStarting to apply Execute Queries...");
 
                 workloadExecutor
                         .add(new QueryExecutor(parser, phoenixUtil, workloadExecutor, parser.getDataModels(), queryHint,
                                 isFunctional, writeRuntimeResults));
 
             } else {
-                logger.info(
+                LOGGER.info(
                         "\nSKIPPED: Multithreaded query set execution as -q argument not specified");
             }
 
             // Clean up the monitor explicitly
             if (monitorManager != null) {
-                logger.info("Run completed. Shutting down Monitor.");
+                LOGGER.info("Run completed. Shutting down Monitor.");
                 monitorManager.complete();
             }
 
@@ -305,7 +305,7 @@ public class Pherf {
 
         } finally {
             if (workloadExecutor != null) {
-                logger.info("Run completed. Shutting down thread pool.");
+                LOGGER.info("Run completed. Shutting down thread pool.");
                 workloadExecutor.shutdown();
             }
         }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
index 8f2a1d8..87b4403 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
 
 public class XMLConfigParser {
 
-    private static final Logger logger = LoggerFactory.getLogger(XMLConfigParser.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParser.class);
     private String filePattern;
     private List<DataModel> dataModels;
     private List<Scenario> scenarios = null;
@@ -96,7 +96,7 @@ public class XMLConfigParser {
                     scenarios.add(scenario);
                 }
             } catch (JAXBException e) {
-                logger.error("Unable to parse scenario file "+path, e);
+                LOGGER.error("Unable to parse scenario file "+path, e);
                 throw e;
             }
         }
@@ -122,7 +122,7 @@ public class XMLConfigParser {
         JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class);
         Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
         String fName = PherfConstants.RESOURCE_SCENARIO + "/" + file.getFileName().toString();
-        logger.info("Open config file: " + fName);
+        LOGGER.info("Open config file: " + fName);
         XMLStreamReader xmlReader = xif.createXMLStreamReader(
             new StreamSource(XMLConfigParser.class.getResourceAsStream(fName)));
         return (DataModel) jaxbUnmarshaller.unmarshal(xmlReader);
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
index 929f96a..1cf740e 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
@@ -31,7 +31,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 public class ResultManager {
-    private static final Logger logger = LoggerFactory.getLogger(ResultManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ResultManager.class);
 
     private final List<ResultHandler> resultHandlers;
     private final ResultUtil util;
@@ -153,7 +153,7 @@ public class ResultManager {
                 handler.flush();
             } catch (Exception e) {
                 e.printStackTrace();
-                logger.warn("Could not flush handler: "
+                LOGGER.warn("Could not flush handler: "
                         + handler.getResultFileName() + " : " + e.getMessage());
             }
         }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
index 662037e..8f3ec59 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
@@ -39,7 +39,7 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
 
 public class RulesApplier {
-    private static final Logger logger = LoggerFactory.getLogger(RulesApplier.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RulesApplier.class);
     private static final AtomicLong COUNTER = new AtomicLong(0);
 
     // Used to bail out of random distribution if it takes too long
@@ -116,7 +116,7 @@ public class RulesApplier {
         List<Scenario> scenarios = parser.getScenarios();
         DataValue value = null;
         if (scenarios.contains(scenario)) {
-            logger.debug("We found a correct Scenario");
+            LOGGER.debug("We found a correct Scenario");
             
             Map<DataTypeMapping, List> overrideRuleMap = this.getCachedScenarioOverrides(scenario);
             
@@ -124,7 +124,7 @@ public class RulesApplier {
 	            List<Column> overrideRuleList = this.getCachedScenarioOverrides(scenario).get(phxMetaColumn.getType());
 	            
 				if (overrideRuleList != null && overrideRuleList.contains(phxMetaColumn)) {
-					logger.debug("We found a correct override column rule");
+					LOGGER.debug("We found a correct override column rule");
 					Column columnRule = getColumnForRuleOverride(overrideRuleList, phxMetaColumn);
 					if (columnRule != null) {
 						return getDataValue(columnRule);
@@ -139,12 +139,12 @@ public class RulesApplier {
             // Make sure Column from Phoenix Metadata matches a rule column
             if (ruleList.contains(phxMetaColumn)) {
                 // Generate some random data based on this rule
-                logger.debug("We found a correct column rule");
+                LOGGER.debug("We found a correct column rule");
                 Column columnRule = getColumnForRule(ruleList, phxMetaColumn);
 
                 value = getDataValue(columnRule);
             } else {
-                logger.warn("Attempted to apply rule to data, but could not find a rule to match type:"
+                LOGGER.warn("Attempted to apply rule to data, but could not find a rule to match type:"
                                 + phxMetaColumn.getType()
                 );
             }
@@ -177,7 +177,7 @@ public class RulesApplier {
         }
 
         if ((prefix.length() >= length) && (length > 0)) {
-            logger.warn("You are attempting to generate data with a prefix (" + prefix + ") "
+            LOGGER.warn("You are attempting to generate data with a prefix (" + prefix + ") "
                     + "That is longer than expected overall field length (" + length + "). "
                     + "This will certainly lead to unexpected data values.");
         }
@@ -352,7 +352,7 @@ public class RulesApplier {
             // While it's possible to get here if you have a bunch of really small distributions,
             // It's just really unlikely. This is just a safety just so we actually pick a value.
             if(count++ == OH_SHIT_LIMIT){
-                logger.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT);
+                LOGGER.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT);
                 generatedDataValue = valueRule;
             }
 
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
index 5ccdaaa..53c4408 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
@@ -33,7 +33,7 @@ import java.sql.Connection;
 import java.util.Collection;
 
 public class SchemaReader {
-    private static final Logger logger = LoggerFactory.getLogger(SchemaReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SchemaReader.class);
     private final PhoenixUtil pUtil;
     private Collection<Path> resourceList;
     private final String searchPattern;
@@ -64,7 +64,7 @@ public class SchemaReader {
         try {
             connection = pUtil.getConnection(null);
             for (Path file : resourceList) {
-                logger.info("\nApplying schema to file: " + file);
+                LOGGER.info("\nApplying schema to file: " + file);
                 pUtil.executeStatement(resourceToString(file), connection);
             }
         } finally {
@@ -88,12 +88,12 @@ public class SchemaReader {
     }
 
     private void read() throws Exception {
-        logger.debug("Trying to match resource pattern: " + searchPattern);
+        LOGGER.debug("Trying to match resource pattern: " + searchPattern);
         System.out.println("Trying to match resource pattern: " + searchPattern);
 
         resourceList = null;
         resourceList = resourceUtil.getResourceList(searchPattern);
-        logger.info("File resourceList Loaded: " + resourceList);
+        LOGGER.info("File resourceList Loaded: " + resourceList);
         System.out.println("File resourceList Loaded: " + resourceList);
         if (resourceList.isEmpty()) {
             throw new FileLoaderException("Could not load Schema Files");
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
index 72ab3e0..cda9b99 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
@@ -44,7 +44,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 
 public class PhoenixUtil {
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixUtil.class);
     private static String zookeeper;
     private static int rowCountOverride = 0;
     private boolean testEnabled;
@@ -106,7 +106,7 @@ public class PhoenixUtil {
             Properties props = new Properties();
             if (null != tenantId) {
                 props.setProperty("TenantId", tenantId);
-                logger.debug("\nSetting tenantId to " + tenantId);
+                LOGGER.debug("\nSetting tenantId to " + tenantId);
             }
             String url = "jdbc:phoenix:thin:url=" + queryServerUrl + ";serialization=PROTOBUF";
             return DriverManager.getConnection(url, props);
@@ -118,7 +118,7 @@ public class PhoenixUtil {
             Properties props = new Properties();
             if (null != tenantId) {
                 props.setProperty("TenantId", tenantId);
-                logger.debug("\nSetting tenantId to " + tenantId);
+                LOGGER.debug("\nSetting tenantId to " + tenantId);
             }
             
             if (phoenixProperty != null) {
@@ -223,12 +223,12 @@ public class PhoenixUtil {
                         + "."
                         + resultSet.getString(TABLE_NAME);
                 if (tableName.matches(regexMatch)) {
-                    logger.info("\nDropping " + tableName);
+                    LOGGER.info("\nDropping " + tableName);
                     try {
                         executeStatementThrowException("DROP TABLE "
                                 + tableName + " CASCADE", conn);
                     } catch (org.apache.phoenix.schema.TableNotFoundException tnf) {
-                        logger.error("Table might be already be deleted via cascade. Schema: "
+                        LOGGER.error("Table might be already be deleted via cascade. Schema: "
                                 + tnf.getSchemaName()
                                 + " Table: "
                                 + tnf.getTableName());
@@ -288,7 +288,7 @@ public class PhoenixUtil {
             if (null != query.getDdl()) {
                 Connection conn = null;
                 try {
-                    logger.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
+                    LOGGER.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
                             .getTenantId());
                     executeStatement(query.getDdl(),
                             conn = getConnection(query.getTenantId()));
@@ -312,7 +312,7 @@ public class PhoenixUtil {
             Connection conn = null;
             try {
             	for (Ddl ddl : ddls) {
-	                logger.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId);
+	                LOGGER.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId);
 	                long startTime = System.currentTimeMillis();
 	                executeStatement(ddl.toString(), conn = getConnection(tenantId));
 	                if (ddl.getStatement().toUpperCase().contains(ASYNC_KEYWORD)) {
@@ -362,10 +362,10 @@ public class PhoenixUtil {
      */
     boolean isYarnJobInProgress(String tableName) {
 		try {
-			logger.info("Fetching YARN apps...");
+            LOGGER.info("Fetching YARN apps...");
 			Set<String> response = new PhoenixMRJobSubmitter().getSubmittedYarnApps();
 			for (String str : response) {
-				logger.info("Runnng YARN app: " + str);
+                LOGGER.info("Runnng YARN app: " + str);
 				if (str.toUpperCase().contains(tableName.toUpperCase())) {
 					return true;
 				}
@@ -382,7 +382,7 @@ public class PhoenixUtil {
     }
 
     public static void setZookeeper(String zookeeper) {
-        logger.info("Setting zookeeper: " + zookeeper);
+        LOGGER.info("Setting zookeeper: " + zookeeper);
         useThickDriver(zookeeper);
     }
 
@@ -406,7 +406,7 @@ public class PhoenixUtil {
      * @throws Exception
      */
     public void updatePhoenixStats(String tableName, Scenario scenario) throws Exception {
-        logger.info("Updating stats for " + tableName);
+        LOGGER.info("Updating stats for " + tableName);
         executeStatement("UPDATE STATISTICS " + tableName, scenario);
     }
 
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
index 0b54641..df5dbf7 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
@@ -40,7 +40,7 @@ import java.util.zip.ZipFile;
  * list resources available from the classpath @ *
  */
 public class ResourceList {
-    private static final Logger logger = LoggerFactory.getLogger(ResourceList.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ResourceList.class);
     private final String rootResourceDir;
 
     public ResourceList(String rootResourceDir) {
@@ -84,10 +84,10 @@ public class ResourceList {
 
             String rName = rootResourceDir + resource;
 
-            logger.debug("Trying with the root append.");
+            LOGGER.debug("Trying with the root append.");
             url = ResourceList.class.getResource(rName);
             if (url == null) {
-                logger.debug("Failed! Must be using a jar. Trying without the root append.");
+                LOGGER.debug("Failed! Must be using a jar. Trying without the root append.");
                 url = ResourceList.class.getResource(resource);
 
                 if (url == null) {
@@ -99,7 +99,7 @@ public class ResourceList {
             } else {
                 path = Paths.get(url.toURI());
             }
-            logger.debug("Found the correct resource: " + path.toString());
+            LOGGER.debug("Found the correct resource: " + path.toString());
             paths.add(path);
         }
 
@@ -143,11 +143,11 @@ public class ResourceList {
             final ZipEntry ze = (ZipEntry) e.nextElement();
             final String fileName = ze.getName();
             final boolean accept = pattern.matcher(fileName).matches();
-            logger.trace("fileName:" + fileName);
-            logger.trace("File:" + file.toString());
-            logger.trace("Match:" + accept);
+            LOGGER.trace("fileName:" + fileName);
+            LOGGER.trace("File:" + file.toString());
+            LOGGER.trace("Match:" + accept);
             if (accept) {
-                logger.trace("Adding File from Jar: " + fileName);
+                LOGGER.trace("Adding File from Jar: " + fileName);
                 retVal.add("/" + fileName);
             }
         }
@@ -171,7 +171,7 @@ public class ResourceList {
                 final String fileName = file.getName();
                 final boolean accept = pattern.matcher(file.toString()).matches();
                 if (accept) {
-                    logger.debug("Adding File from directory: " + fileName);
+                    LOGGER.debug("Adding File from directory: " + fileName);
                     retval.add("/" + fileName);
                 }
             }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
index 4423bbd..ecc432b 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
@@ -40,7 +40,7 @@ import org.apache.phoenix.pherf.configuration.XMLConfigParser;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
 
 class MultiThreadedRunner implements Callable<Void> {
-    private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadedRunner.class);
     private Query query;
     private ThreadTime threadTime;
     private PhoenixUtil pUtil = PhoenixUtil.create();
@@ -87,7 +87,7 @@ class MultiThreadedRunner implements Callable<Void> {
      */
     @Override
     public Void call() throws Exception {
-        logger.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
+        LOGGER.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
                 + numberOfExecutions + "times\n\n");
         Long start = System.currentTimeMillis();
         for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
@@ -106,7 +106,7 @@ class MultiThreadedRunner implements Callable<Void> {
             resultManager.flush();
         }
 
-        logger.info("\n\nThread exiting." + threadName + "\n\n");
+        LOGGER.info("\n\nThread exiting." + threadName + "\n\n");
         return null;
     }
 
@@ -137,7 +137,7 @@ class MultiThreadedRunner implements Callable<Void> {
             conn.setAutoCommit(true);
             final String statementString = query.getDynamicStatement(ruleApplier, scenario);
             statement = conn.prepareStatement(statementString);
-            logger.info("Executing: " + statementString);
+            LOGGER.info("Executing: " + statementString);
             
             if (scenario.getWriteParams() != null) {
             	Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, scenario, GeneratePhoenixStats.NO);
@@ -165,7 +165,7 @@ class MultiThreadedRunner implements Callable<Void> {
                 conn.commit();
             }
         } catch (Exception e) {
-            logger.error("Exception while executing query", e);
+            LOGGER.error("Exception while executing query", e);
             exception = e.getMessage();
             throw e;
         } finally {
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index 6e828bd..71de762 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -31,7 +31,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 class MultithreadedDiffer implements Callable<Void> {
-    private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class);
     private Thread t;
     private Query query;
     private ThreadTime threadTime;
@@ -82,7 +82,7 @@ class MultithreadedDiffer implements Callable<Void> {
      * Executes verification runs for a minimum of number of execution or execution duration
      */
     public Void call() throws Exception {
-        logger.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
+        LOGGER.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
                 + numberOfExecutions + "times\n\n");
         Long start = System.currentTimeMillis();
         for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
@@ -93,7 +93,7 @@ class MultithreadedDiffer implements Callable<Void> {
                 e.printStackTrace();
             }
         }
-        logger.info("\n\nThread exiting." + t.getName() + "\n\n");
+            LOGGER.info("\n\nThread exiting." + t.getName() + "\n\n");
         return null;
     }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
index c4a3517..d894a96 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
@@ -36,7 +36,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
 public class QueryExecutor implements Workload {
-    private static final Logger logger = LoggerFactory.getLogger(QueryExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryExecutor.class);
     private List<DataModel> dataModels;
     private String queryHint;
     private final boolean exportCSV;
@@ -113,7 +113,7 @@ public class QueryExecutor implements Workload {
                         }
                     }
                 } catch (Exception e) {
-                    logger.error("Scenario throws exception", e);
+                    LOGGER.error("Scenario throws exception", e);
                     throw e;
                 }
                 return null;
@@ -165,7 +165,7 @@ public class QueryExecutor implements Workload {
                     resultManager.write(dataModelResults, ruleApplier);
                     resultManager.flush();
                 } catch (Exception e) {
-                    logger.error("Scenario throws exception", e);
+                    LOGGER.error("Scenario throws exception", e);
                     throw e;
                 }
                 return null;
@@ -255,7 +255,7 @@ public class QueryExecutor implements Workload {
         queryResult.getThreadTimes().add(threadTime);
         threadTime.setThreadName(name);
         queryResult.setHint(this.queryHint);
-        logger.info("\nExecuting query " + queryResult.getStatement());
+        LOGGER.info("\nExecuting query " + queryResult.getStatement());
         Callable<Void> thread;
         if (workloadExecutor.isPerformance()) {
             thread =
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
index 7b2bb12..786f778 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
@@ -44,7 +44,7 @@ import difflib.Patch;
 
 public class QueryVerifier {
     private PhoenixUtil pUtil = PhoenixUtil.create();
-    private static final Logger logger = LoggerFactory.getLogger(QueryVerifier.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryVerifier.class);
     private boolean useTemporaryOutput;
     private String directoryLocation;
 
@@ -110,10 +110,10 @@ public class QueryVerifier {
 
         Patch patch = DiffUtils.diff(original, newLines);
         if (patch.getDeltas().isEmpty()) {
-            logger.info("Match: " + query.getId() + " with " + newCSV);
+            LOGGER.info("Match: " + query.getId() + " with " + newCSV);
             return true;
         } else {
-            logger.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
+            LOGGER.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
             return false;
         }
     }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
index 4abb574..ff599b8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
@@ -31,7 +31,7 @@ import java.util.Properties;
 import java.util.concurrent.*;
 
 public class WorkloadExecutor {
-    private static final Logger logger = LoggerFactory.getLogger(WorkloadExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExecutor.class);
     private final int poolSize;
     private final boolean isPerformance;
 
@@ -87,7 +87,7 @@ public class WorkloadExecutor {
             future.get();
             jobs.remove(workload);
         } catch (InterruptedException | ExecutionException e) {
-            logger.error("", e);
+            LOGGER.error("", e);
         }
     }
 
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
index cae223c..3df5fe8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -52,7 +52,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class WriteWorkload implements Workload {
-    private static final Logger logger = LoggerFactory.getLogger(WriteWorkload.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WriteWorkload.class);
 
     public static final String USE_BATCH_API_PROPERTY = "pherf.default.dataloader.batchApi";
 
@@ -169,7 +169,7 @@ public class WriteWorkload implements Workload {
                     resultUtil.write(dataLoadThreadTime);
 
                 } catch (Exception e) {
-                    logger.error("WriteWorkLoad failed", e);
+                    LOGGER.error("WriteWorkLoad failed", e);
                     throw e;
                 }
                 return null;
@@ -179,7 +179,7 @@ public class WriteWorkload implements Workload {
 
     private synchronized void exec(DataLoadTimeSummary dataLoadTimeSummary,
             DataLoadThreadTime dataLoadThreadTime, Scenario scenario) throws Exception {
-        logger.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
+        LOGGER.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
         
         // Execute any pre dataload scenario DDLs
         pUtil.executeScenarioDdl(scenario.getPreScenarioDdls(), scenario.getTenantId(), dataLoadTimeSummary);
@@ -190,11 +190,11 @@ public class WriteWorkload implements Workload {
 
         // Update Phoenix Statistics
         if (this.generateStatistics == GeneratePhoenixStats.YES) {
-        	logger.info("Updating Phoenix table statistics...");
+            LOGGER.info("Updating Phoenix table statistics...");
         	pUtil.updatePhoenixStats(scenario.getTableName(), scenario);
-        	logger.info("Stats update done!");
+            LOGGER.info("Stats update done!");
         } else {
-        	logger.info("Phoenix table stats update not requested.");
+            LOGGER.info("Phoenix table stats update not requested.");
         }
         
         // Execute any post data load scenario DDLs before starting query workload
@@ -214,7 +214,7 @@ public class WriteWorkload implements Workload {
                     pUtil.getColumnsFromPhoenix(scenario.getSchemaName(),
                             scenario.getTableNameWithoutSchemaName(), pUtil.getConnection(scenario.getTenantId()));
             int threadRowCount = rowCalculator.getNext();
-            logger.info(
+            LOGGER.info(
                     "Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows.");
             Future<Info>
                     write =
@@ -239,11 +239,11 @@ public class WriteWorkload implements Workload {
             Info writeInfo = write.get();
             sumRows += writeInfo.getRowCount();
             sumDuration += writeInfo.getDuration();
-            logger.info("Executor (" + this.hashCode() + ") writes complete with row count ("
+            LOGGER.info("Executor (" + this.hashCode() + ") writes complete with row count ("
                     + writeInfo.getRowCount() + ") in Ms (" + writeInfo.getDuration() + ")");
         }
         long testDuration = System.currentTimeMillis() - start;
-        logger.info("Writes completed with total row count (" + sumRows
+        LOGGER.info("Writes completed with total row count (" + sumRows
                 + ") with total elapsed time of (" + testDuration
                 + ") ms and total CPU execution time of (" + sumDuration + ") ms");
         dataLoadTimeSummary
@@ -296,7 +296,7 @@ public class WriteWorkload implements Workload {
                             }
                             connection.commit();
                             duration = System.currentTimeMillis() - last;
-                            logger.info("Writer (" + Thread.currentThread().getName()
+                            LOGGER.info("Writer (" + Thread.currentThread().getName()
                                     + ") committed Batch. Total " + getBatchSize()
                                     + " rows for this thread (" + this.hashCode() + ") in ("
                                     + duration + ") Ms");
@@ -315,7 +315,7 @@ public class WriteWorkload implements Workload {
                         }
                     }
                 } catch (SQLException e) {
-                    logger.error("Scenario " + scenario.getName() + " failed with exception ", e);
+                    LOGGER.error("Scenario " + scenario.getName() + " failed with exception ", e);
                     throw e;
                 } finally {
                     // Need to keep the statement open to send the remaining batch of updates
@@ -342,7 +342,7 @@ public class WriteWorkload implements Workload {
                         try {
                             connection.commit();
                             duration = System.currentTimeMillis() - start;
-                            logger.info("Writer ( " + Thread.currentThread().getName()
+                            LOGGER.info("Writer ( " + Thread.currentThread().getName()
                                     + ") committed Final Batch. Duration (" + duration + ") Ms");
                             connection.close();
                         } catch (SQLException e) {
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
index 0b6c9cc..343285f 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
@@ -38,7 +38,7 @@ import javax.xml.bind.Marshaller;
 import static org.junit.Assert.*;
 
 public class ConfigurationParserTest extends ResultBaseTest {
-    private static final Logger logger = LoggerFactory.getLogger(ConfigurationParserTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ConfigurationParserTest.class);
 
     @Test
     public void testReadWriteWorkloadReader() throws Exception {
@@ -65,7 +65,7 @@ public class ConfigurationParserTest extends ResultBaseTest {
     public void testConfigReader() {
         try {
 
-            logger.debug("DataModel: " + writeXML());
+            LOGGER.debug("DataModel: " + writeXML());
             List<Scenario> scenarioList = getScenarios();
             List<Column> dataMappingColumns = getDataModel().getDataMappingColumns();
             assertTrue("Could not load the data columns from xml.",
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
index c5746f9..a4285f4 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java
@@ -33,7 +33,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class XMLConfigParserTest {
-    private static final Logger LOG = LoggerFactory.getLogger(XMLConfigParserTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParserTest.class);
   
     @Test
     public void testDTDInScenario() throws Exception {
@@ -45,7 +45,7 @@ public class XMLConfigParserTest {
             fail("The scenario should have failed to parse because it contains a DTD");
         } catch (UnmarshalException e) {
             // If we don't parse the DTD, the variable 'name' won't be defined in the XML
-            LOG.warn("Caught expected exception", e);
+            LOGGER.warn("Caught expected exception", e);
             Throwable cause = e.getLinkedException();
             assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException);
         }
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
index 98c492f..83a28e0 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java
@@ -32,7 +32,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class XMLResultHandlerTest {
-    private static final Logger LOG = LoggerFactory.getLogger(XMLResultHandlerTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLResultHandlerTest.class);
 
     @Test
     public void testDTDInResults() throws Exception {
@@ -45,7 +45,7 @@ public class XMLResultHandlerTest {
           fail("Expected to see an exception parsing the results with a DTD");
         } catch (UnmarshalException e) {
           // If we don't parse the DTD, the variable 'name' won't be defined in the XML
-          LOG.debug("Caught expected exception", e);
+          LOGGER.debug("Caught expected exception", e);
           Throwable cause = e.getLinkedException();
           assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException);
         }
diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
index 249f8e6..f420fed 100755
--- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
+++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
@@ -19,13 +19,14 @@ package org.apache.phoenix.tracingwebapp.http;
 import java.net.URL;
 import java.security.ProtectionDomain;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.BasicConfigurator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.webapp.WebAppContext;
 
@@ -34,7 +35,7 @@ import org.eclipse.jetty.webapp.WebAppContext;
  */
 public final class Main extends Configured implements Tool {
 
-    protected static final Log LOG = LogFactory.getLog(Main.class);
+    protected static final Logger LOGGER = LoggerFactory.getLogger(Main.class);
     public static final String PHONIX_DBSERVER_PORT_KEY =
         "phoenix.dbserver.port";
     public static final int DEFAULT_DBSERVER_PORT = 2181;