You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sk...@apache.org on 2019/07/24 21:18:08 UTC

[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5228 use slf4j for logging in phoenix project (addendum)

This is an automated email from the ASF dual-hosted git repository.

skadam pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
     new 8ab2050  PHOENIX-5228 use slf4j for logging in phoenix project (addendum)
8ab2050 is described below

commit 8ab2050815b40f480ec4899b95a89d8a3ef9c371
Author: Xinyi <xy...@salesforce.com>
AuthorDate: Sat Jun 15 18:41:22 2019 -0700

    PHOENIX-5228 use slf4j for logging in phoenix project (addendum)
    
    Signed-off-by: Chinmay Kulkarni <ch...@apache.org>
---
 .../wal/WALRecoveryRegionPostOpenIT.java           |   4 +-
 ...WALReplayWithIndexWritesAndCompressedWALIT.java |   3 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java    |   6 +-
 .../end2end/ConnectionQueryServicesTestImpl.java   |   5 +-
 .../end2end/PartialScannerResultsDisabledIT.java   |   5 +-
 .../end2end/TableSnapshotReadsMapReduceIT.java     |   2 +
 .../index/IndexRebuildIncrementDisableCountIT.java |   3 +-
 .../index/InvalidIndexStateClientSideIT.java       |   3 +-
 .../phoenix/end2end/index/MutableIndexIT.java      |   1 -
 .../execute/UpsertSelectOverlappingBatchesIT.java  |  19 ++--
 .../index/FailForUnsupportedHBaseVersionsIT.java   |   3 +-
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      |   3 +-
 .../IndexHalfStoreFileReaderGenerator.java         |   3 +-
 .../wal/BinaryCompatibleBaseDecoder.java           |   1 +
 .../java/org/apache/phoenix/cache/GlobalCache.java |  14 ++-
 .../apache/phoenix/cache/ServerCacheClient.java    |  24 +++--
 .../org/apache/phoenix/cache/TenantCacheImpl.java  |   2 +-
 .../apache/phoenix/cache/aggcache/SpillFile.java   |  12 +--
 .../cache/aggcache/SpillableGroupByCache.java      |  22 ++--
 .../java/org/apache/phoenix/call/CallRunner.java   |   1 +
 .../org/apache/phoenix/compile/FromCompiler.java   |  17 ++-
 .../GroupedAggregateRegionObserver.java            |  36 ++++---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  66 ++++++------
 .../coprocessor/MetaDataRegionObserver.java        | 118 ++++++++++++---------
 .../coprocessor/PhoenixAccessController.java       |   6 +-
 .../UngroupedAggregateRegionObserver.java          |  42 ++++----
 .../org/apache/phoenix/execute/AggregatePlan.java  |   4 +-
 .../org/apache/phoenix/execute/BaseQueryPlan.java  |   6 +-
 .../org/apache/phoenix/execute/MutationState.java  |  28 ++---
 .../java/org/apache/phoenix/execute/ScanPlan.java  |   4 +-
 .../apache/phoenix/expression/LikeExpression.java  |  20 ++--
 .../aggregator/FirstLastValueServerAggregator.java |   6 +-
 .../aggregator/SizeTrackingServerAggregators.java  |   4 +-
 .../expression/function/CollationKeyFunction.java  |   6 +-
 .../phoenix/filter/RowKeyComparisonFilter.java     |   6 +-
 .../org/apache/phoenix/hbase/index/Indexer.java    |  18 ++--
 .../index/parallel/QuickFailingTaskRunner.java     |   1 +
 .../hbase/index/util/IndexManagementUtil.java      |   3 +-
 .../hbase/index/write/RecoveryIndexWriter.java     |   3 +-
 .../TrackingParallelWriterIndexCommitter.java      |  10 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  20 ++--
 .../phoenix/iterate/BaseResultIterators.java       |  10 +-
 .../phoenix/iterate/ChunkedResultIterator.java     |   8 +-
 .../apache/phoenix/iterate/ParallelIterators.java  |   6 +-
 .../phoenix/iterate/RoundRobinResultIterator.java  |   6 +-
 .../phoenix/iterate/TableResultIterator.java       |   4 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java     |  16 +--
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  17 +--
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  |  26 +++--
 .../java/org/apache/phoenix/log/QueryLogger.java   |   2 +-
 .../apache/phoenix/log/QueryLoggerDisruptor.java   |   5 +-
 .../mapreduce/FormatToBytesWritableMapper.java     |   3 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  |   5 +-
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |   3 +-
 .../phoenix/mapreduce/PhoenixRecordReader.java     |   6 +-
 .../PhoenixServerBuildIndexInputFormat.java        |   3 +-
 .../phoenix/mapreduce/RegexToKeyValueMapper.java   |   5 +-
 .../apache/phoenix/mapreduce/index/IndexTool.java  |   9 +-
 .../index/PhoenixIndexPartialBuildMapper.java      |   3 +-
 .../index/PhoenixServerBuildIndexMapper.java       |   4 -
 .../index/automation/PhoenixMRJobSubmitter.java    |   3 +-
 .../mapreduce/util/PhoenixConfigurationUtil.java   |   8 +-
 .../apache/phoenix/memory/GlobalMemoryManager.java |   4 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java | 109 ++++++++++---------
 .../org/apache/phoenix/schema/MetaDataClient.java  |  28 ++---
 .../schema/stats/DefaultStatisticsCollector.java   |  21 ++--
 .../phoenix/schema/stats/StatisticsScanner.java    |  12 ++-
 .../java/org/apache/phoenix/trace/TraceReader.java |   4 +-
 .../transaction/OmidTransactionContext.java        |   1 -
 .../transaction/TephraTransactionContext.java      |   6 +-
 .../phoenix/util/EquiDepthStreamHistogram.java     |   3 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |   8 +-
 .../org/apache/phoenix/util/ReadOnlyProps.java     |   4 +-
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 115 ++++++++++----------
 .../phoenix/util/json/JsonUpsertExecutor.java      |   3 +-
 .../phoenix/hbase/index/write/TestIndexWriter.java |   4 +-
 .../java/org/apache/phoenix/query/BaseTest.java    |  20 ++--
 .../tool/ParameterizedPhoenixCanaryToolIT.java     |   7 +-
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  28 ++---
 .../pherf/configuration/XMLConfigParser.java       |   6 +-
 .../apache/phoenix/pherf/result/ResultManager.java |   4 +-
 .../apache/phoenix/pherf/rules/RulesApplier.java   |  14 +--
 .../apache/phoenix/pherf/schema/SchemaReader.java  |   8 +-
 .../org/apache/phoenix/pherf/util/PhoenixUtil.java |  22 ++--
 .../apache/phoenix/pherf/util/ResourceList.java    |  18 ++--
 .../pherf/workload/MultiThreadedRunner.java        |  10 +-
 .../pherf/workload/MultithreadedDiffer.java        |   1 -
 .../phoenix/pherf/workload/QueryExecutor.java      |   8 +-
 .../phoenix/pherf/workload/QueryVerifier.java      |   6 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |   4 +-
 .../phoenix/pherf/workload/WriteWorkload.java      |  24 ++---
 .../phoenix/pherf/ConfigurationParserTest.java     |   4 +-
 .../apache/phoenix/tracingwebapp/http/Main.java    |   1 +
 93 files changed, 662 insertions(+), 557 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
index 5d7d438..c2984c7 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
@@ -68,11 +68,11 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 10e5b80..fa248a5 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -86,7 +86,8 @@ import org.slf4j.LoggerFactory;
 @Category(NeedsOwnMiniClusterTest.class)
 public class WALReplayWithIndexWritesAndCompressedWALIT {
 
-  public static final Logger LOGGER = LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
+  public static final Logger LOGGER =
+          LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
   @Rule
   public TableName table = new TableName();
   private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index ed3669c..17b496b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -84,7 +84,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
     protected String tableName;
     protected String indexName;
 
-    private static final Logger logger = LoggerFactory.getLogger(BaseQueryIT.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryIT.class);
 
     public BaseQueryIT(String idxDdl, boolean columnEncoded, boolean keepDeletedCells) throws Exception {
         StringBuilder optionBuilder = new StringBuilder();
@@ -102,7 +102,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
                         date = new Date(System.currentTimeMillis()), null, getUrl(),
                         tableDDLOptions);
         } catch (Exception e) {
-            logger.error("Exception when creating aTable ", e);
+            LOGGER.error("Exception when creating aTable ", e);
             throw e;
         }
         this.indexName = generateUniqueName();
@@ -114,7 +114,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
             try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
                 conn.createStatement().execute(this.indexDDL);
             } catch (Exception e) {
-                logger.error("Exception while creating index: " + indexDDL, e);
+                LOGGER.error("Exception while creating index: " + indexDDL, e);
                 throw e;
             }
         }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
index 3db93b0..877a7ad 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
@@ -49,7 +49,8 @@ import com.google.common.collect.Sets;
  * @since 0.1
  */
 public class ConnectionQueryServicesTestImpl extends ConnectionQueryServicesImpl {
-    private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesTestImpl.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(ConnectionQueryServicesTestImpl.class);
     protected int NUM_SLAVES_BASE = 1; // number of slaves for the cluster
     // Track open connections to free them on close as unit tests don't always do this.
     private Set<PhoenixConnection> connections = Sets.newHashSet();
@@ -84,7 +85,7 @@ public class ConnectionQueryServicesTestImpl extends ConnectionQueryServicesImpl
                         try {
                             service.close();
                         } catch (IOException e) {
-                            logger.warn(e.getMessage(), e);
+                            LOGGER.warn(e.getMessage(), e);
                         }
                     }
                 }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
index 59471dd..6de1c35 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
@@ -78,7 +78,8 @@ public class PartialScannerResultsDisabledIT extends ParallelStatsDisabledIT {
     private String schemaName;
     private String dataTableFullName;
     private static String indexTableFullName;
-    private static final Logger logger = LoggerFactory.getLogger(PartialScannerResultsDisabledIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PartialScannerResultsDisabledIT.class);
     private static Random random = new Random(1);
     // background writer threads
     private static Random sourceOfRandomness = new Random(0);
@@ -99,7 +100,7 @@ public class PartialScannerResultsDisabledIT extends ParallelStatsDisabledIT {
             // TODO: it's likely that less data could be written if whatever
             // config parameters decide this are lowered.
             writeSingleBatch(conn, 100, 20, dataTableFullName);
-            logger.info("Running scrutiny");
+            LOGGER.info("Running scrutiny");
             // Scutunize index to see if partial results are silently returned
             // In that case we'll get a false positive on the scrutiny run.
             long rowCount = IndexScrutiny.scrutinizeIndex(conn, dataTableFullName, indexTableFullName);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index cae91a3..f2fc39f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -55,6 +55,7 @@ import org.junit.Test;
 import com.google.common.collect.Maps;
 
 public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
+
   private final static String SNAPSHOT_NAME = "FOO";
   private static final String FIELD1 = "FIELD1";
   private static final String FIELD2 = "FIELD2";
@@ -210,6 +211,7 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
   }
 
     public void deleteSnapshot(String tableName) throws Exception {
+
         try (Connection conn = DriverManager.getConnection(getUrl());
                 HBaseAdmin admin =
                         conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
index cf48f5f..bdeb735 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -56,7 +56,8 @@ import org.slf4j.LoggerFactory;
 import com.google.common.collect.Maps;
 
 public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
     private static long pendingDisableCount = 0;
     private static String ORG_PREFIX = "ORG";
     private static Result pendingDisableCountResult = null;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
index 6d06505..351a1c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -54,7 +54,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
-    private static final Logger LOGGER = LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
 
     @Test
     public void testCachedConnections() throws Throwable {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index d72238d..9a9fa91 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -78,7 +78,6 @@ import com.google.common.primitives.Doubles;
 
 @RunWith(Parameterized.class)
 public class MutableIndexIT extends ParallelStatsDisabledIT {
-    
     protected final boolean localIndex;
     private final String tableDDLOptions;
 	
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
index dc9de81..3c81879 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
@@ -66,7 +66,8 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterIT {
-    private static final Logger logger = LoggerFactory.getLogger(UpsertSelectOverlappingBatchesIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(UpsertSelectOverlappingBatchesIT.class);
     private Properties props;
     private static volatile String dataTable;
     private String index;
@@ -129,11 +130,11 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
                 }
                 catch (Exception e) {
                     if (ExceptionUtils.indexOfThrowable(e, InterruptedException.class) != -1) {
-                        logger.info("Interrupted, exiting", e);
+                        LOGGER.info("Interrupted, exiting", e);
                         Thread.currentThread().interrupt();
                         return;
                     }
-                    logger.error("Hit exception while writing", e);
+                    LOGGER.error("Hit exception while writing", e);
                 }
             }
         }};
@@ -214,17 +215,17 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
                     try {
                         List<HRegionInfo> regions = admin.getTableRegions(dataTN);
                         if (regions.size() > 1) {
-                            logger.info("Found region was split");
+                            LOGGER.info("Found region was split");
                             return true;
                         }
                         if (regions.size() == 0) {
                             // This happens when region in transition or closed
-                            logger.info("No region returned");
+                            LOGGER.info("No region returned");
                             return false;
                         }
                         ;
                         HRegionInfo hRegion = regions.get(0);
-                        logger.info("Attempting to split region");
+                        LOGGER.info("Attempting to split region");
                         admin.splitRegion(hRegion.getRegionName(), Bytes.toBytes(2));
                         return false;
                     } catch (NotServingRegionException nsre) {
@@ -263,7 +264,7 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
             final HBaseAdmin admin = utility.getHBaseAdmin();
             final HRegionInfo dataRegion =
                     admin.getTableRegions(TableName.valueOf(dataTable)).get(0);
-            logger.info("Closing data table region");
+            LOGGER.info("Closing data table region");
             admin.closeRegion(dataRs.getServerName(), dataRegion);
             // make sure the region is offline
             utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@@ -273,11 +274,11 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
                             admin.getOnlineRegions(dataRs.getServerName());
                     for (HRegionInfo onlineRegion : onlineRegions) {
                         if (onlineRegion.equals(dataRegion)) {
-                            logger.info("Data region still online");
+                            LOGGER.info("Data region still online");
                             return false;
                         }
                     }
-                    logger.info("Region is no longer online");
+                    LOGGER.info("Region is no longer online");
                     return true;
                 }
             });
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index b920bf4..aaf533e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -46,7 +46,8 @@ import org.slf4j.LoggerFactory;
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class FailForUnsupportedHBaseVersionsIT {
-    private static final Logger LOGGER = LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
 
     /**
      * We don't support WAL Compression for HBase &lt; 0.94.9, so we shouldn't even allow the server
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index 6d82c7a..fe5db1f 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -37,7 +37,8 @@ import com.google.common.base.Preconditions;
  */
 public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
 
     private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
             "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 78932f1..0764ea8 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -68,7 +68,8 @@ import com.google.common.collect.Lists;
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
 
     private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
-    public static final Logger LOGGER = LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
+    public static final Logger LOGGER =
+            LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
 
     @Override
     public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
index 799357d..e30370f 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.codec.Codec;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 /**
  * This class is a copy paste version of org.apache.hadoop.hbase.codec.BaseDecoder class. 
  * This class is meant to be used in {@link IndexedWALEditCodec} when runtime version of
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
index ae77174..365bd26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
@@ -54,7 +54,7 @@ import com.google.common.cache.Weigher;
  * @since 0.1
  */
 public class GlobalCache extends TenantCacheImpl {
-    private static final Logger logger = LoggerFactory.getLogger(GlobalCache.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalCache.class);
     private static volatile GlobalCache INSTANCE; 
     
     private final Configuration config;
@@ -65,16 +65,20 @@ public class GlobalCache extends TenantCacheImpl {
     
     public long clearTenantCache() {
         long unfreedBytes = getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory();
-        if (unfreedBytes != 0 && logger.isDebugEnabled()) {
-            logger.debug("Found " + (getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory()) + " bytes not freed from global cache");
+        if (unfreedBytes != 0 && LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Found " + (getMemoryManager().getMaxMemory() -
+                    getMemoryManager().getAvailableMemory()) +
+                    " bytes not freed from global cache");
         }
         removeAllServerCache();
         for (Map.Entry<ImmutableBytesWritable, TenantCache> entry : perTenantCacheMap.entrySet()) {
             TenantCache cache = entry.getValue();
             long unfreedTenantBytes = cache.getMemoryManager().getMaxMemory() - cache.getMemoryManager().getAvailableMemory();
-            if (unfreedTenantBytes != 0 && logger.isDebugEnabled()) {
+            if (unfreedTenantBytes != 0 && LOGGER.isDebugEnabled()) {
                 ImmutableBytesWritable cacheId = entry.getKey();
-                logger.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " + Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(), cacheId.getLength()));
+                LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " +
+                        Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(),
+                                cacheId.getLength()));
             }
             unfreedBytes += unfreedTenantBytes;
             cache.removeAllServerCache();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index bc67674..07702e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -255,7 +255,8 @@ public class ServerCacheClient {
                     // Call RPC once per server
                     servers.add(entry);
                     if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
+                        LOGGER.debug(addCustomAnnotations(
+                                "Adding cache entry to be sent for " + entry, connection));
                     }
                     final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                     final HTableInterface htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
@@ -284,12 +285,13 @@ public class ServerCacheClient {
                     }));
                 } else {
                     if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry +
-                                " since one already exists for that entry", connection));
+                        LOGGER.debug(addCustomAnnotations(
+                                "NOT adding cache entry to be sent for " + entry +
+                                        " since one already exists for that entry", connection));
                     }
                 }
             }
-            
+
             hashCacheSpec = new ServerCache(cacheId,servers,cachePtr, services, storeCacheOnClient);
             // Execute in parallel
             int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
@@ -326,7 +328,8 @@ public class ServerCacheClient {
             }
         }
         if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
+            LOGGER.debug(addCustomAnnotations("Cache " + cacheId +
+                    " successfully added to servers.", connection));
         }
         return hashCacheSpec;
     }
@@ -354,7 +357,8 @@ public class ServerCacheClient {
              * to.
              */
             if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
+                LOGGER.debug(addCustomAnnotations("Removing Cache " +
+                        cacheId + " from servers.", connection));
             }
             for (HRegionLocation entry : locations) {
              // Call once per server
@@ -397,13 +401,15 @@ public class ServerCacheClient {
                         remainingOnServers.remove(entry);
                     } catch (Throwable t) {
                         lastThrowable = t;
-                        LOGGER.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
-                                t);
+                        LOGGER.error(addCustomAnnotations(
+                                "Error trying to remove hash cache for " + entry,
+                                connection), t);
                     }
                 }
             }
             if (!remainingOnServers.isEmpty()) {
-                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for "
+                                + remainingOnServers, connection),
                         lastThrowable);
             }
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
index 1dc59bc..ae66ffe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
@@ -97,7 +97,7 @@ public class TenantCacheImpl implements TenantCache {
         }
         return serverCaches;
     }
-    
+
     @Override
     public Closeable getServerCache(ImmutableBytesPtr cacheId) {
         getServerCaches().cleanUp();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
index 51aef98..dbad335 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
@@ -42,7 +42,7 @@ import java.util.UUID;
  */
 public class SpillFile implements Closeable {
 
-    private static final Logger logger = LoggerFactory.getLogger(SpillFile.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SpillFile.class);
     // Default size for a single spillFile 2GB
     private static final int SPILL_FILE_SIZE = Integer.MAX_VALUE;
     // Page size for a spill file 4K
@@ -72,13 +72,13 @@ public class SpillFile implements Closeable {
 			Closeables.closeQuietly(rndFile);
 			
 			if (file != null) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Deleting tempFile: " + file.getAbsolutePath());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Deleting tempFile: " + file.getAbsolutePath());
                 }
                 try {
                     file.delete();
                 } catch (SecurityException e) {
-                    logger.warn("IOException thrown while closing Closeable." + e);
+                    LOGGER.warn("IOException thrown while closing Closeable." + e);
             	}
             }
 		}
@@ -108,8 +108,8 @@ public class SpillFile implements Closeable {
         // Create temp file in temp dir or custom dir if provided
         File tempFile = File.createTempFile(UUID.randomUUID().toString(),
           null, spillFilesDirectory);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Creating new SpillFile: " + tempFile.getAbsolutePath());
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Creating new SpillFile: " + tempFile.getAbsolutePath());
         }
         RandomAccessFile file = new RandomAccessFile(tempFile, "rw");
         file.setLength(SPILL_FILE_SIZE);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index dc0ae21..821dc6b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -93,7 +93,7 @@ import org.slf4j.LoggerFactory;
 
 public class SpillableGroupByCache implements GroupByCache {
 
-    private static final Logger logger = LoggerFactory.getLogger(SpillableGroupByCache.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SpillableGroupByCache.class);
 
     // Min size of 1st level main memory cache in bytes --> lower bound
     private static final int SPGBY_CACHE_MIN_SIZE = 4096; // 4K
@@ -148,13 +148,14 @@ public class SpillableGroupByCache implements GroupByCache {
         try {
             this.chunk = tenantCache.getMemoryManager().allocate(estSize);
         } catch (InsufficientMemoryException ime) {
-            logger.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
+            LOGGER.error("Requested Map size exceeds memory limit, " +
+                    "please decrease max size via config paramter: "
                     + GROUPBY_MAX_CACHE_SIZE_ATTRIB);
             throw ime;
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
         }
 
         // LRU cache implemented as LinkedHashMap with access order
@@ -240,8 +241,8 @@ public class SpillableGroupByCache implements GroupByCache {
             if (rowAggregators == null) {
                 // No, key never spilled before, create a new tuple
                 rowAggregators = aggregators.newAggregators(env.getConfiguration());
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Adding new aggregate bucket for row key "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new aggregate bucket for row key "
                             + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()));
                 }
             }
@@ -359,10 +360,11 @@ public class SpillableGroupByCache implements GroupByCache {
                 ImmutableBytesWritable key = ce.getKey();
                 Aggregator[] aggs = ce.getValue();
                 byte[] value = aggregators.toBytes(aggs);
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Adding new distinct group: "
-                            + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators "
-                            + aggs.toString() + " value = " + Bytes.toStringBinary(value));
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new distinct group: "
+                            + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) +
+                            " with aggregators " + aggs.toString() + " value = " +
+                            Bytes.toStringBinary(value));
                 }
                 results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
                         SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
index f3b568b..face677 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
@@ -21,6 +21,7 @@ import java.util.concurrent.Callable;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 /**
  * Helper class to run a Call with a set of {@link CallWrapper}
  */
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index dab0ef1..bb73838 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -104,7 +104,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class FromCompiler {
-    private static final Logger logger = LoggerFactory.getLogger(FromCompiler.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(FromCompiler.class);
 
     public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() {
 
@@ -606,8 +606,13 @@ public class FromCompiler {
                 timeStamp += tsAddition;
             }
             TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
-            if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
-                logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns(), connection));
+            if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                LOGGER.debug(LogUtil.addCustomAnnotations(
+                        "Re-resolved stale table " + fullTableName + " with seqNum "
+                                + tableRef.getTable().getSequenceNumber() + " at timestamp "
+                                + tableRef.getTable().getTimeStamp() + " with "
+                                + tableRef.getTable().getColumns().size() + " columns: "
+                                + tableRef.getTable().getColumns(), connection));
             }
             return tableRef;
         }
@@ -655,8 +660,10 @@ public class FromCompiler {
                 timeStamp += tsAddition;
             }
             
-            if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
-                logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " + functionNames.toString() + "at timestamp " + timeStamp, connection));
+            if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                LOGGER.debug(LogUtil.addCustomAnnotations(
+                        "Re-resolved stale function " + functionNames.toString() +
+                                "at timestamp " + timeStamp, connection));
             }
             return functionsFound;
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index aefe916..3ea04a9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -88,7 +88,7 @@ import com.google.common.collect.Maps;
  * @since 0.1
  */
 public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
-    private static final Logger logger = LoggerFactory
+    private static final Logger LOGGER = LoggerFactory
             .getLogger(GroupedAggregateRegionObserver.class);
     public static final int MIN_DISTINCT_VALUES = 100;
 
@@ -278,8 +278,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 // If Aggregators not found for this distinct
                 // value, clone our original one (we need one
                 // per distinct value)
-                if (logger.isDebugEnabled()) {
-                    logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key "
                             + Bytes.toStringBinary(key.get(), key.getOffset(),
                                 key.getLength()), customAnnotations));
                 }
@@ -313,8 +313,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 // Generate byte array of Aggregators and set as value of row
                 byte[] value = aggregators.toBytes(rowAggregators);
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug(LogUtil.addCustomAnnotations("Adding new distinct group: "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations("Adding new distinct group: "
                             + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength())
                             + " with aggregators " + Arrays.asList(rowAggregators).toString()
                             + " value = " + Bytes.toStringBinary(value), customAnnotations));
@@ -382,9 +382,11 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
     private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
             final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, long limit) throws IOException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
-                    + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Grouped aggregation over unordered rows with scan " + scan
+                    + ", group by " + expressions + ", aggregators " + aggregators,
+                    ScanUtil.getCustomAnnotations(scan)));
         }
         RegionCoprocessorEnvironment env = c.getEnvironment();
         Configuration conf = env.getConfiguration();
@@ -410,8 +412,10 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         try {
             boolean hasMore;
             Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
-            if (logger.isDebugEnabled()) {
-                logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(LogUtil.addCustomAnnotations(
+                        "Spillable groupby enabled: " + spillableEnabled,
+                        ScanUtil.getCustomAnnotations(scan)));
             }
             Region region = c.getEnvironment().getRegion();
             boolean acquiredLock = false;
@@ -466,9 +470,11 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, final long limit) throws IOException {
 
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by "
-                    + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Grouped aggregation over ordered rows with scan " + scan + ", group by "
+                    + expressions + ", aggregators " + aggregators,
+                    ScanUtil.getCustomAnnotations(scan)));
         }
         final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
         final boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers);
@@ -508,8 +514,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                                 aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
                                 if (!aggBoundary) {
                                     aggregators.aggregate(rowAggregators, result);
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug(LogUtil.addCustomAnnotations(
+                                    if (LOGGER.isDebugEnabled()) {
+                                        LOGGER.debug(LogUtil.addCustomAnnotations(
                                             "Row passed filters: " + kvs
                                             + ", aggregated values: "
                                             + Arrays.asList(rowAggregators),
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index f810dd7..3adaf33 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -272,7 +272,7 @@ import com.google.protobuf.Service;
  */
 @SuppressWarnings("deprecation")
 public class MetaDataEndpointImpl extends MetaDataProtocol implements CoprocessorService, Coprocessor {
-    private static final Logger logger = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
 
     // Column to track tables that have been upgraded based on PHOENIX-2067
     public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE";
@@ -517,7 +517,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         this.isTablesMappingEnabled = SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
                 new ReadOnlyProps(config.iterator()));
 
-        logger.info("Starting Tracing-Metrics Systems");
+        LOGGER.info("Starting Tracing-Metrics Systems");
         // Start the phoenix trace collection
         Tracing.addTraceMetricsSource();
         Metrics.ensureConfigured();
@@ -593,7 +593,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             done.run(builder.build());
             return;
         } catch (Throwable t) {
-            logger.error("getTable failed", t);
+            LOGGER.error("getTable failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -617,8 +617,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             }
             if (oldTable == null || tableTimeStamp < newTable.getTimeStamp()
                     || (blockWriteRebuildIndex && newTable.getIndexDisableTimestamp() > 0)) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Caching table "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Caching table "
                             + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(),
                                 cacheKey.getLength()) + " at seqNum "
                             + newTable.getSequenceNumber() + " with newer timestamp "
@@ -1823,7 +1823,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 releaseRowLocks(region,locks);
             }
         } catch (Throwable t) {
-            logger.error("createTable failed", t);
+            LOGGER.error("createTable failed", t);
             ProtobufUtil.setControllerException(controller,
                     ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -1848,11 +1848,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 view = loadTable(env, tableKey, cacheKey, clientTimeStamp, clientTimeStamp, clientVersion);
             }
             catch (Throwable t) {
-                logger.error("Loading tenant view failed", t);
+                LOGGER.error("Loading tenant view failed", t);
             }
 
             if (view == null) {
-                logger.warn("Found orphan tenant view row in SYSTEM.CATALOG with tenantId:"
+                LOGGER.warn("Found orphan tenant view row in SYSTEM.CATALOG with tenantId:"
                         + Bytes.toString(tenantId) + ", schema:"
                         + Bytes.toString(viewSchema) + ", table:"
                         + Bytes.toString(viewTable));
@@ -2094,7 +2094,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 releaseRowLocks(region,locks);
             }
         } catch (Throwable t) {
-          logger.error("dropTable failed", t);
+            LOGGER.error("dropTable failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -2293,12 +2293,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 invalidateList.add(cacheKey);
                 Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                 PTable table = (PTable)metaDataCache.getIfPresent(cacheKey);
-                if (logger.isDebugEnabled()) {
+                if (LOGGER.isDebugEnabled()) {
+
                     if (table == null) {
-                        logger.debug("Table " + Bytes.toStringBinary(key)
+                        LOGGER.debug("Table " + Bytes.toStringBinary(key)
                                 + " not found in cache. Will build through scan");
                     } else {
-                        logger.debug("Table " + Bytes.toStringBinary(key)
+                        LOGGER.debug("Table " + Bytes.toStringBinary(key)
                                 + " found in cache with timestamp " + table.getTimeStamp()
                                 + " seqNum " + table.getSequenceNumber());
                     }
@@ -2311,7 +2312,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     // found
                     table = buildDeletedTable(key, cacheKey, region, clientTimeStamp);
                     if (table != null) {
-                        logger.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
+                        LOGGER.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
                         return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
                                 EnvironmentEdgeManager.currentTimeMillis(), null);
                     }
@@ -2319,7 +2320,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             EnvironmentEdgeManager.currentTimeMillis(), null);
                 }
                 if (table.getTimeStamp() >= clientTimeStamp) {
-                    logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
+                    LOGGER.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
                             + clientTimeStamp);
                     return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
                             EnvironmentEdgeManager.currentTimeMillis(), table);
@@ -2328,15 +2329,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; // lookup TABLE_SEQ_NUM in
                                                                                          // tableMetaData
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
                             + expectedSeqNum + " and found seqNum " + table.getSequenceNumber()
                             + " with " + table.getColumns().size() + " columns: "
                             + table.getColumns());
                 }
                 if (expectedSeqNum != table.getSequenceNumber()) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("For table " + Bytes.toStringBinary(key)
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug("For table " + Bytes.toStringBinary(key)
                                 + " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum");
                     }
                     return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION,
@@ -2585,10 +2586,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 view = doGetTable(viewKey, clientTimeStamp, viewRowLock, clientVersion);
             }
             catch (Throwable t) {
-                logger.warn("Loading tenant view failed", t);
+                LOGGER.warn("Loading tenant view failed", t);
             }
             if (view == null) {
-                logger.warn("Found orphan tenant view row in SYSTEM.CATALOG with tenantId:"
+                LOGGER.warn("Found orphan tenant view row in SYSTEM.CATALOG with tenantId:"
                     + Bytes.toString(tenantId) + ", schema:"
                     + Bytes.toString(schema) + ", table:"
                     + Bytes.toString(table));
@@ -3306,7 +3307,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                                     // return if we were not able to add the column successfully
                                     if (mutationResult!=null)
                                         return mutationResult;
-                                } 
+                                }
                             }
                         }
                     } else if (type == PTableType.VIEW
@@ -3399,7 +3400,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 done.run(MetaDataMutationResult.toProto(result));
             }
         } catch (Throwable e) {
-            logger.error("Add column failed: ", e);
+            LOGGER.error("Add column failed: ", e);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException("Error when adding column: ", e));
         }
@@ -3640,7 +3641,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 done.run(MetaDataMutationResult.toProto(result));
             }
         } catch (Throwable e) {
-            logger.error("Drop column failed: ", e);
+            LOGGER.error("Drop column failed: ", e);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException("Error when dropping column: ", e));
         }
@@ -3718,7 +3719,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         Configuration config = env.getConfiguration();
         if (isTablesMappingEnabled
                 && MetaDataProtocol.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > request.getClientVersion()) {
-            logger.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");
+            LOGGER.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");
             ProtobufUtil.setControllerException(controller,
                     ServerUtil.createIOException(
                             SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
@@ -3738,7 +3739,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             systemCatalog = loadTable(env, tableKey, cacheKey, MIN_SYSTEM_TABLE_TIMESTAMP,
               HConstants.LATEST_TIMESTAMP, request.getClientVersion());
         } catch (Throwable t) {
-            logger.error("loading system catalog table inside getVersion failed", t);
+            LOGGER.error("loading system catalog table inside getVersion failed", t);
             ProtobufUtil.setControllerException(controller,
               ServerUtil.createIOException(
                 SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
@@ -4011,7 +4012,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 rowLock.release();
             }
         } catch (Throwable t) {
-          logger.error("updateIndexState failed", t);
+            LOGGER.error("updateIndexState failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -4024,7 +4025,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
     private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) {
         return checkKeyInRegion(key, region, MutationCode.TABLE_NOT_IN_REGION);
-
     }
 
     private static MetaDataMutationResult checkFunctionKeyInRegion(byte[] key, Region region) {
@@ -4125,7 +4125,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     GlobalCache.getInstance(this.env).getMetaDataCache();
             metaDataCache.invalidate(cacheKey);
         } catch (Throwable t) {
-            logger.error("clearTableFromCache failed", t);
+            LOGGER.error("clearTableFromCache failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -4222,7 +4222,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             done.run(builder.build());
             return;
         } catch (Throwable t) {
-            logger.error("getFunctions failed", t);
+            LOGGER.error("getFunctions failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(functionNames.toString(), t));
         }
@@ -4296,7 +4296,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 releaseRowLocks(region,locks);
             }
         } catch (Throwable t) {
-          logger.error("createFunction failed", t);
+            LOGGER.error("createFunction failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(Bytes.toString(functionName), t));
         }         
@@ -4348,7 +4348,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 releaseRowLocks(region,locks);
             }
         } catch (Throwable t) {
-          logger.error("dropFunction failed", t);
+            LOGGER.error("dropFunction failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(Bytes.toString(functionName), t));
         }         
@@ -4463,7 +4463,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 releaseRowLocks(region,locks);
             }
         } catch (Throwable t) {
-            logger.error("Creating the schema" + schemaName + "failed", t);
+            LOGGER.error("Creating the schema" + schemaName + "failed", t);
             ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
         }
     }
@@ -4507,7 +4507,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 releaseRowLocks(region,locks);
             }
         } catch (Throwable t) {
-            logger.error("drop schema failed:", t);
+            LOGGER.error("drop schema failed:", t);
             ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 468b7bf..e8f4d0a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -34,8 +34,6 @@ import java.util.concurrent.TimeUnit;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -53,8 +51,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.compile.MutationPlan;
 import org.apache.phoenix.compile.PostDDLCompiler;
@@ -86,6 +82,8 @@ import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.UpgradeUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
@@ -99,7 +97,7 @@ import com.google.common.collect.Maps;
  */
 @SuppressWarnings("deprecation")
 public class MetaDataRegionObserver extends BaseRegionObserver {
-    public static final Log LOG = LogFactory.getLog(MetaDataRegionObserver.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(MetaDataRegionObserver.class);
     public static final String REBUILD_INDEX_APPEND_TO_URL_STRING = "REBUILDINDEX";
     // PHOENIX-5094 To differentiate the increment in PENDING_DISABLE_COUNT made by client or index
     // rebuilder, we are using large value for index rebuilder
@@ -175,16 +173,16 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         @Override
                         public Void run() throws Exception {
                             if (UpgradeUtil.truncateStats(mTable, sTable)) {
-                                LOG.info("Stats are successfully truncated for upgrade 4.7!!");
+                                LOGGER.info("Stats are successfully truncated for upgrade 4.7!!");
                             }
                             return null;
                         }
                     });
 
                 } catch (Exception exception) {
-                    LOG.warn("Exception while truncate stats..,"
-                            + " please check and delete stats manually inorder to get proper result with old client!!");
-                    LOG.warn(exception.getStackTrace());
+                    LOGGER.warn("Exception while truncate stats.., please check and delete stats " +
+                            "manually inorder to get proper result with old client!!");
+                    LOGGER.warn(exception.getStackTrace().toString());
                 } finally {
                     try {
                         if (metaTable != null) {
@@ -202,14 +200,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         t.start();
 
         if (!enableRebuildIndex) {
-            LOG.info("Failure Index Rebuild is skipped by configuration.");
+            LOGGER.info("Failure Index Rebuild is skipped by configuration.");
             return;
         }
-        // turn off verbose deprecation logging
-        Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
-        if (deprecationLogger != null) {
-            deprecationLogger.setLevel(Level.WARN);
-        }
         // Ensure we only run one of the index rebuilder tasks
         if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) {
             try {
@@ -219,7 +212,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                 BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
                 executor.scheduleWithFixedDelay(task, initialRebuildTaskDelay, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS);
             } catch (ClassNotFoundException ex) {
-                LOG.error("BuildIndexScheduleTask cannot start!", ex);
+                LOGGER.error("BuildIndexScheduleTask cannot start!", ex);
             }
         }
     }
@@ -266,8 +259,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     IndexUtil.incrementCounterForIndex(conn, indexName, -PENDING_DISABLE_INACTIVE_STATE_COUNT);
                     indexesIncremented.add(index);
                 }catch(Exception e) {
-                    LOG.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" +
-                            index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
+                    LOGGER.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT
+                            + " for index :" + index.getName().getString() + "of table: "
+                            + dataPTable.getName().getString(), e);
                 }
             }
             return indexesIncremented;
@@ -305,7 +299,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     results.clear();
                     hasMore = scanner.next(results);
                     if (results.isEmpty()) {
-                        LOG.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP");
+                        LOGGER.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP");
                         break;
                     }
 
@@ -315,7 +309,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     Cell indexStateCell = r.getColumnLatestCell(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
 
                     if (disabledTimeStamp == null || disabledTimeStamp.length == 0) {
-                        LOG.debug("Null or empty INDEX_DISABLE_TIMESTAMP");
+                        LOGGER.debug("Null or empty INDEX_DISABLE_TIMESTAMP");
                         continue;
                     }
 
@@ -326,7 +320,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
                     if ((dataTable == null || dataTable.length == 0) || indexStateCell == null) {
                         // data table name can't be empty
-                        LOG.debug("Null or data table name or index state");
+                        LOGGER.debug("Null or data table name or index state");
                         continue;
                     }
 
@@ -338,14 +332,15 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 
                     // validity check
                     if (indexTable == null || indexTable.length == 0) {
-                        LOG.debug("We find IndexTable empty during rebuild scan:" + scan
+                        LOGGER.debug("We find IndexTable empty during rebuild scan:" + scan
                                 + "so, Index rebuild has been skipped for row=" + r);
                         continue;
                     }
                     
                     String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable);
                     if (onlyTheseTables != null && !onlyTheseTables.contains(dataTableFullName)) {
-                        LOG.debug("Could not find " + dataTableFullName + " in " + onlyTheseTables);
+                        LOGGER.debug("Could not find " + dataTableFullName +
+                                " in " + onlyTheseTables);
                         continue;
                     }
 
@@ -359,7 +354,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     PTable indexPTable = PhoenixRuntime.getTableNoCache(conn, indexTableFullName);
                     // Sanity check in case index was removed from table
                     if (!dataPTable.getIndexes().contains(indexPTable)) {
-                        LOG.debug(dataTableFullName + " does not contain " + indexPTable.getName().getString());
+                        LOGGER.debug(dataTableFullName + " does not contain " +
+                                indexPTable.getName().getString());
                         continue;
                     }
                     
@@ -381,8 +377,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     // an index write fails.
                     if ((indexState == PIndexState.DISABLE || indexState == PIndexState.PENDING_ACTIVE)
                             && !MetaDataUtil.tableRegionsOnline(this.env.getConfiguration(), indexPTable)) {
-                        LOG.debug("Index rebuild has been skipped because not all regions of index table="
-                                + indexPTable.getName() + " are online.");
+                        LOGGER.debug("Index rebuild has been skipped because not all regions of" +
+                                " index table=" + indexPTable.getName() + " are online.");
                         continue;
                     }
 
@@ -395,12 +391,13 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                          */
                         try {
                             IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, 0l);
-                            LOG.error("Unable to rebuild index " + indexTableFullName
-                                    + ". Won't attempt again since index disable timestamp is older than current time by "
-                                    + indexDisableTimestampThreshold
-                                    + " milliseconds. Manual intervention needed to re-build the index");
+                            LOGGER.error("Unable to rebuild index " + indexTableFullName
+                                    + ". Won't attempt again since index disable timestamp is" +
+                                    " older than current time by " + indexDisableTimestampThreshold
+                                    + " milliseconds. Manual intervention needed to re-build" +
+                                    " the index");
                         } catch (Throwable ex) {
-                            LOG.error(
+                            LOGGER.error(
                                 "Unable to mark index " + indexTableFullName + " as disabled.", ex);
                         }
                         continue; // don't attempt another rebuild irrespective of whether
@@ -419,7 +416,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, null);
                         continue; // Must wait until clients start to do index maintenance again
                     } else if (indexState != PIndexState.INACTIVE && indexState != PIndexState.ACTIVE) {
-                        LOG.warn("Unexpected index state of " + indexTableFullName + "=" + indexState + ". Skipping partial rebuild attempt.");
+                        LOGGER.warn("Unexpected index state of " + indexTableFullName + "="
+                                + indexState + ". Skipping partial rebuild attempt.");
                         continue;
                     }
                     long currentTime = EnvironmentEdgeManager.currentTimeMillis();
@@ -428,7 +426,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                                     QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME);
                     // Wait until no failures have occurred in at least forwardOverlapDurationMs
                     if (indexStateCell.getTimestamp() + forwardOverlapDurationMs > currentTime) {
-                        LOG.debug("Still must wait " + (indexStateCell.getTimestamp() + forwardOverlapDurationMs - currentTime) + " before starting rebuild for " + indexTableFullName);
+                        LOGGER.debug("Still must wait " + (indexStateCell.getTimestamp() +
+                                forwardOverlapDurationMs - currentTime) +
+                                " before starting rebuild for " + indexTableFullName);
                         continue; // Haven't waited long enough yet
                     }
                     Long upperBoundOfRebuild = indexStateCell.getTimestamp() + forwardOverlapDurationMs;
@@ -439,8 +439,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
                         dataTableToIndexesMap.put(dataPTable, indexesToPartiallyRebuild);
                     }
-                    LOG.debug("We have found " + indexPTable.getIndexState() + " Index:" + indexPTable.getName()
-                            + " on data table:" + dataPTable.getName() + " which failed to be updated at "
+                    LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" +
+                            indexPTable.getName() + " on data table:" + dataPTable.getName() +
+                            " which failed to be updated at "
                             + indexPTable.getIndexDisableTimestamp());
                     indexesToPartiallyRebuild.add(new Pair<PTable,Long>(indexPTable,upperBoundOfRebuild));
                 } while (hasMore);
@@ -475,7 +476,10 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								long disabledTimeStampVal = index.getIndexDisableTimestamp();
 								if (disabledTimeStampVal != 0) {
                                     if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) {
-                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild);
+                                        LOGGER.warn("Found unexpected mix of signs with " +
+                                                "INDEX_DISABLE_TIMESTAMP for " +
+                                                dataPTable.getName().getString() + " with " +
+                                                indexesToPartiallyRebuild);
                                     }
 								    signOfDisableTimeStamp = Long.signum(disabledTimeStampVal);
 	                                disabledTimeStampVal = Math.abs(disabledTimeStampVal);
@@ -492,14 +496,15 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 							}
 							// No indexes are disabled, so skip this table
 							if (earliestDisableTimestamp == Long.MAX_VALUE) {
-		                        LOG.debug("No indexes are disabled so continuing");
+		                        LOGGER.debug("No indexes are disabled so continuing");
 								continue;
 							}
 							long scanBeginTime = Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs);
                             long scanEndTime = Math.min(latestUpperBoundTimestamp,
                                     getTimestampForBatch(scanBeginTime,batchExecutedPerTableMap.get(dataPTable.getName())));
-							LOG.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
-									+ " from timestamp=" + scanBeginTime + " until " + scanEndTime);
+							LOGGER.info("Starting to build " + dataPTable + " indexes "
+                                    + indexesToPartiallyRebuild + " from timestamp=" +
+                                    scanBeginTime + " until " + scanEndTime);
 							
 							TableRef tableRef = new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false);
 							// TODO Need to set high timeout
@@ -518,18 +523,21 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 							byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr);
 							dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue);
 							ScanUtil.setClientVersion(dataTableScan, MetaDataProtocol.PHOENIX_VERSION);
-                            LOG.info("Starting to partially build indexes:" + indexesToPartiallyRebuild
-                                    + " on data table:" + dataPTable.getName() + " with the earliest disable timestamp:"
+                            LOGGER.info("Starting to partially build indexes:" +
+                                    indexesToPartiallyRebuild + " on data table:" +
+                                    dataPTable.getName() + " with the earliest disable timestamp:"
                                     + earliestDisableTimestamp + " till "
-                                    + (scanEndTime == HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime));
+                                    + (scanEndTime == HConstants.LATEST_TIMESTAMP ?
+                                    "LATEST_TIMESTAMP" : scanEndTime));
 							MutationState mutationState = plan.execute();
 							long rowCount = mutationState.getUpdateCount();
 							decrementIndexesPendingDisableCount(conn, dataPTable, indexesToPartiallyRebuild);
 							if (scanEndTime == latestUpperBoundTimestamp) {
-                                LOG.info("Rebuild completed for all inactive/disabled indexes in data table:"
-                                        + dataPTable.getName());
+                                LOGGER.info("Rebuild completed for all inactive/disabled indexes" +
+                                        " in data table:" + dataPTable.getName());
                             }
-                            LOG.info(" no. of datatable rows read in rebuilding process is " + rowCount);
+                            LOGGER.info(" no. of datatable rows read in rebuilding process is "
+                                    + rowCount);
 							for (PTable indexPTable : indexesToPartiallyRebuild) {
 								String indexTableFullName = SchemaUtil.getTableName(
 										indexPTable.getSchemaName().getString(),
@@ -539,7 +547,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								        IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
 								            latestUpperBoundTimestamp);
 								        batchExecutedPerTableMap.remove(dataPTable.getName());
-								        LOG.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
+								        LOGGER.info("Making Index:" + indexPTable.getTableName() +
+                                                " active after rebuilding");
 								    } else {
 								        // Increment timestamp so that client sees updated disable timestamp
 								        IndexUtil.updateIndexState(conn, indexTableFullName, indexPTable.getIndexState(),
@@ -549,34 +558,37 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
 								            noOfBatches = 0l;
 								        }
 								        batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-								        LOG.info(
-								            "During Round-robin build: Successfully updated index disabled timestamp  for "
+								        LOGGER.info(
+								            "During Round-robin build: Successfully updated " +
+                                                    "index disabled timestamp  for "
 								                + indexTableFullName + " to " + scanEndTime);
 								    }
 								} catch (SQLException e) {
-								    LOG.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
+								    LOGGER.error("Unable to rebuild " + dataPTable + " index " +
+                                            indexTableFullName, e);
 								}
 							}
 						} catch (Exception e) {
-							LOG.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
+						    LOGGER.error("Unable to rebuild " + dataPTable + " indexes " +
+                                    indexesToPartiallyRebuild, e);
 						}
 					}
 				}
 			} catch (Throwable t) {
-				LOG.warn("ScheduledBuildIndexTask failed!", t);
+                LOGGER.warn("ScheduledBuildIndexTask failed!", t);
 			} finally {
 				if (scanner != null) {
 					try {
 						scanner.close();
 					} catch (IOException ignored) {
-						LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
+					    LOGGER.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
 					}
 				}
 				if (conn != null) {
 					try {
 						conn.close();
 					} catch (SQLException ignored) {
-						LOG.debug("ScheduledBuildIndexTask can't close connection", ignored);
+                        LOGGER.debug("ScheduledBuildIndexTask can't close connection", ignored);
 					}
 				}
 			}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 05539f3..56e2a11 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -122,7 +122,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
         this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
                 QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
         if (!this.accessCheckEnabled) {
-            LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
+            LOGGER.warn(
+                    "PhoenixAccessController has been loaded with authorization checks disabled.");
         }
         if (env instanceof PhoenixMetaDataControllerEnvironment) {
             this.env = (PhoenixMetaDataControllerEnvironment)env;
@@ -600,7 +601,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
               }
             }
         } else if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
+            LOGGER.debug("No permissions found for table=" +
+                    table + " or namespace=" + table.getNamespaceAsString());
         }
         return false;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 161f9ec..c5fc266 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -201,7 +201,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
     private int scansReferenceCount = 0;
     @GuardedBy("lock")
     private boolean isRegionClosingOrSplitting = false;
-    private static final Logger logger = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
     private KeyValueBuilder kvBuilder;
     private Configuration upsertSelectConfig;
     private Configuration compactionConfig;
@@ -274,7 +274,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
           }
       }
       // TODO: should we use the one that is all or none?
-      logger.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
+      LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
       region.batchMutate(mutations.toArray(mutationArray), HConstants.NO_NONCE, HConstants.NO_NONCE);
     }
 
@@ -300,7 +300,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
           return;
       }
 
-        logger.debug("Committing batch of " + mutations.size() + " mutations for " + table);
+        LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + table);
         try {
             table.batch(mutations);
         } catch (InterruptedException e) {
@@ -424,7 +424,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
         boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
         if (isDescRowKeyOrderUpgrade) {
-            logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
+            LOGGER.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
             projectedTable = deserializeTable(descRowKeyTableBytes);
             try {
                 writeToTable = PTableImpl.makePTable(projectedTable, true);
@@ -534,8 +534,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             rowAggregators = aggregators.getAggregators();
             Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
             Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
-            if (logger.isDebugEnabled()) {
-                logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
             }
             boolean useIndexProto = true;
             byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
@@ -809,7 +809,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 synchronized (lock) {
                     scansReferenceCount--;
                     if (scansReferenceCount < 0) {
-                        logger.warn(
+                        LOGGER.warn(
                             "Scan reference count went below zero. Something isn't correct. Resetting it back to zero");
                         scansReferenceCount = 0;
                     }
@@ -821,7 +821,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                     try {
                         targetHTable.close();
                     } catch (IOException e) {
-                        logger.error("Closing table: " + targetHTable + " failed: ", e);
+                        LOGGER.error("Closing table: " + targetHTable + " failed: ", e);
                     }
                 }
             } finally {
@@ -832,8 +832,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 }
             }
         }
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
         }
 
         final boolean hadAny = hasAny;
@@ -1015,8 +1015,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                     } catch (Exception e) {
                         // If we can't reach the stats table, don't interrupt the normal
                         // compaction operation, just log a warning.
-                        if (logger.isWarnEnabled()) {
-                            logger.warn("Unable to collect stats for " + table, e);
+                        if (LOGGER.isWarnEnabled()) {
+                            LOGGER.warn("Unable to collect stats for " + table, e);
                         }
                     }
                     return internalScanner;
@@ -1112,7 +1112,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 }
             }
         } catch (IOException e) {
-            logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
+            LOGGER.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
             throw e;
         } finally {
             region.closeRegionOperation();
@@ -1154,7 +1154,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
     private RegionScanner collectStats(final RegionScanner innerScanner, StatisticsCollector stats,
             final Region region, final Scan scan, Configuration config) throws IOException {
         if (stats instanceof  NoOpStatisticsCollector) {
-            logger.info("UPDATE STATISTICS didn't run because stats is not enabled");
+            LOGGER.info("UPDATE STATISTICS didn't run because stats is not enabled");
 
             return new BaseRegionScanner(innerScanner) {
                 @Override
@@ -1205,7 +1205,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             }
         } else {
             rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT;
-            logger.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
+            LOGGER.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
                     + region.getRegionInfo().getRegionNameAsString());
         }
         byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
@@ -1302,18 +1302,18 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                 }
                 return compactionRunning ? COMPACTION_UPDATE_STATS_ROW_COUNT : rowCount;
             } catch (IOException e) {
-                logger.error("IOException in update stats: " + Throwables.getStackTraceAsString(e));
+                LOGGER.error("IOException in update stats: " + Throwables.getStackTraceAsString(e));
                 throw e;
             } finally {
                 try {
                     if (noErrors && !compactionRunning) {
                         statsCollector.updateStatistic(region, scan);
-                        logger.info("UPDATE STATISTICS finished successfully for scanner: "
+                        LOGGER.info("UPDATE STATISTICS finished successfully for scanner: "
                                 + innerScanner + ". Number of rows scanned: " + rowCount
                                 + ". Time: " + (System.currentTimeMillis() - startTime));
                     }
                     if (compactionRunning) {
-                        logger.info("UPDATE STATISTICS stopped in between because major compaction was running for region "
+                        LOGGER.info("UPDATE STATISTICS stopped in between because major compaction was running for region "
                                 + region.getRegionInfo().getRegionNameAsString());
                     }
                 } finally {
@@ -1461,7 +1461,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                         // FIXME need to handle views and indexes on views as well
                         for (PTable index : indexes) {
                             if (index.getIndexDisableTimestamp() != 0) {
-                                logger.info(
+                                LOGGER.info(
                                     "Modifying major compaction scanner to retain deleted cells for a table with disabled index: "
                                             + fullTableName);
                                 Scan scan = new Scan();
@@ -1481,10 +1481,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                         }
                     } catch (Exception e) {
                         if (e instanceof TableNotFoundException) {
-                            logger.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
+                            LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
                             // non-Phoenix HBase tables won't be found, do nothing
                         } else {
-                            logger.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
+                            LOGGER.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
                                     + fullTableName,
                                     e);
                         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 0c8e8dc..183878e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -89,7 +89,7 @@ public class AggregatePlan extends BaseQueryPlan {
     private final Expression having;
     private List<KeyRange> splits;
     private List<List<Scan>> scans;
-    private static final Logger logger = LoggerFactory.getLogger(AggregatePlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(AggregatePlan.class);
     private boolean isSerial;
 
     public AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table,
@@ -110,7 +110,7 @@ public class AggregatePlan extends BaseQueryPlan {
         boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL);
         boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); 
         if (hasSerialHint && !canBeExecutedSerially) {
-            logger.warn("This query cannot be executed serially. Ignoring the hint");
+            LOGGER.warn("This query cannot be executed serially. Ignoring the hint");
         }
         this.isSerial = hasSerialHint && canBeExecutedSerially;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 2180c12..dcb642e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -358,12 +358,14 @@ public abstract class BaseQueryPlan implements QueryPlan {
         }
         
         if (LOGGER.isDebugEnabled()) {
-        	LOGGER.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+        	        "Scan ready for iteration: " + scan, connection));
         }
         
         ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
         if (LOGGER.isDebugEnabled()) {
-        	LOGGER.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+        	        "Iterator ready: " + iterator, connection));
         }
 
         // wrap the iterator so we start/end tracing as we expect
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index e062b07..83b3acc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -122,7 +122,7 @@ import com.google.common.collect.Sets;
  * Tracks the uncommitted state
  */
 public class MutationState implements SQLCloseable {
-    private static final Logger logger = LoggerFactory.getLogger(MutationState.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MutationState.class);
     private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0];
     private static final int MAX_COMMIT_RETRIES = 3;
 
@@ -970,7 +970,7 @@ public class MutationState implements SQLCloseable {
                 sendMutations(verifiedOrDeletedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr);
             } catch (SQLException ex) {
                 // TODO: add a metric here
-                logger.warn(
+                LOGGER.warn(
                         "Ignoring exception that happened during setting index verified value to verified=TRUE "
                                 + verifiedOrDeletedIndexMutations.toString(),
                         ex);
@@ -1099,8 +1099,8 @@ public class MutationState implements SQLCloseable {
                         itrListMutation.remove();
 
                         batchCount++;
-                        if (logger.isDebugEnabled())
-                            logger.debug("Sent batch of " + mutationBatch.size() + " for "
+                        if (LOGGER.isDebugEnabled())
+                            LOGGER.debug("Sent batch of " + mutationBatch.size() + " for "
                                     + Bytes.toString(htableName));
                     }
                     child.stop();
@@ -1133,7 +1133,7 @@ public class MutationState implements SQLCloseable {
                             // If it fails again, we don't retry.
                             String msg = "Swallowing exception and retrying after clearing meta cache on connection. "
                                     + inferredE;
-                            logger.warn(LogUtil.addCustomAnnotations(msg, connection));
+                            LOGGER.warn(LogUtil.addCustomAnnotations(msg, connection));
                             connection.getQueryServices().clearTableRegionCache(htableName);
 
                             // add a new child span as this one failed
@@ -1373,8 +1373,8 @@ public class MutationState implements SQLCloseable {
                             finishSuccessful = true;
                         }
                     } catch (SQLException e) {
-                        if (logger.isInfoEnabled())
-                            logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer()
+                        if (LOGGER.isInfoEnabled())
+                            LOGGER.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer()
                                     + " with retry count of " + retryCount);
                         retryCommit = (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION
                             .getErrorCode() && retryCount < MAX_COMMIT_RETRIES);
@@ -1388,9 +1388,9 @@ public class MutationState implements SQLCloseable {
                         if (!finishSuccessful) {
                             try {
                                 phoenixTransactionContext.abort();
-                                if (logger.isInfoEnabled()) logger.info("Abort successful");
+                                if (LOGGER.isInfoEnabled()) LOGGER.info("Abort successful");
                             } catch (SQLException e) {
-                                if (logger.isInfoEnabled()) logger.info("Abort failed with " + e);
+                                if (LOGGER.isInfoEnabled()) LOGGER.info("Abort failed with " + e);
                                 if (sqlE == null) {
                                     sqlE = e;
                                 } else {
@@ -1444,7 +1444,7 @@ public class MutationState implements SQLCloseable {
      * @throws SQLException
      */
     private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) throws SQLException {
-        if (logger.isInfoEnabled()) logger.info("Checking for index updates as of " + getInitialWritePointer());
+        if (LOGGER.isInfoEnabled()) LOGGER.info("Checking for index updates as of " + getInitialWritePointer());
         MetaDataClient client = new MetaDataClient(connection);
         PMetaData cache = connection.getMetaDataCache();
         boolean addedAnyIndexes = false;
@@ -1471,13 +1471,13 @@ public class MutationState implements SQLCloseable {
                 // that an index was dropped and recreated with the same name but different
                 // indexed/covered columns.
                 addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes()));
-                if (logger.isInfoEnabled())
-                    logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to "
+                if (LOGGER.isInfoEnabled())
+                    LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to "
                             + updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes());
             }
         }
-        if (logger.isInfoEnabled())
-            logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer()
+        if (LOGGER.isInfoEnabled())
+            LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer()
                     + " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables"));
         // If all tables are immutable, we know the conflict we got was due to our DDL/DML fence.
         // If any indexes were added, then the conflict might be due to DDL/DML fence.
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index cdb2da5..6d846a2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -86,7 +86,7 @@ import org.slf4j.LoggerFactory;
  * @since 0.1
  */
 public class ScanPlan extends BaseQueryPlan {
-    private static final Logger logger = LoggerFactory.getLogger(ScanPlan.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ScanPlan.class);
     private List<KeyRange> splits;
     private List<List<Scan>> scans;
     private boolean allowPageFilter;
@@ -136,7 +136,7 @@ public class ScanPlan extends BaseQueryPlan {
             boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); 
             if (!canBeExecutedSerially) { 
                 if (hasSerialHint) {
-                    logger.warn("This query cannot be executed serially. Ignoring the hint");
+                    LOGGER.warn("This query cannot be executed serially. Ignoring the hint");
                 }
                 return false;
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
index ec4aa3a..4836894 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
@@ -51,7 +51,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class LikeExpression extends BaseCompoundExpression {
-    private static final Logger logger = LoggerFactory.getLogger(LikeExpression.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(LikeExpression.class);
 
     private static final String ZERO_OR_MORE = "\\E.*\\Q";
     private static final String ANY_ONE = "\\E.\\Q";
@@ -267,15 +267,15 @@ public abstract class LikeExpression extends BaseCompoundExpression {
         AbstractBasePattern pattern = this.pattern;
         if (pattern == null) { // TODO: don't allow? this is going to be slooowwww
             if (!getPatternExpression().evaluate(tuple, ptr)) {
-                if (logger.isTraceEnabled()) {
-                    logger.trace("LIKE is FALSE: pattern is null");
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace("LIKE is FALSE: pattern is null");
                 }
                 return false;
             }
             String value = (String) PVarchar.INSTANCE.toObject(ptr, getPatternExpression().getSortOrder());
             pattern = compilePattern(value);
-            if (logger.isTraceEnabled()) {
-                logger.trace("LIKE pattern is expression: " + pattern.pattern());
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("LIKE pattern is expression: " + pattern.pattern());
             }
         }
 
@@ -283,21 +283,21 @@ public abstract class LikeExpression extends BaseCompoundExpression {
         SortOrder strSortOrder = strExpression.getSortOrder();
         PVarchar strDataType = PVarchar.INSTANCE;
         if (!strExpression.evaluate(tuple, ptr)) {
-            if (logger.isTraceEnabled()) {
-                logger.trace("LIKE is FALSE: child expression is null");
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("LIKE is FALSE: child expression is null");
             }
             return false;
         }
 
         String value = null;
-        if (logger.isTraceEnabled()) {
+        if (LOGGER.isTraceEnabled()) {
             value = (String) strDataType.toObject(ptr, strSortOrder);
         }
         strDataType.coerceBytes(ptr, strDataType, strSortOrder, SortOrder.ASC);
         pattern.matches(ptr);
-        if (logger.isTraceEnabled()) {
+        if (LOGGER.isTraceEnabled()) {
             boolean matched = ((Boolean) PBoolean.INSTANCE.toObject(ptr)).booleanValue();
-            logger.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
+            LOGGER.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
         }
         return true;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
index b41c6c6..f647c45 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
  */
 public class FirstLastValueServerAggregator extends BaseAggregator {
 
-    private static final Logger logger = LoggerFactory.getLogger(FirstLastValueServerAggregator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(FirstLastValueServerAggregator.class);
     protected List<Expression> children;
     protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY);
     protected byte[] topValue;
@@ -88,7 +88,7 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
                 try {
                     addFlag = true;
                 } catch (Exception e) {
-                    logger.error(e.getMessage());
+                    LOGGER.error(e.getMessage());
                 }
             } else {
                 if (isAscending) {
@@ -180,7 +180,7 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
         try {
             ptr.set(payload.getPayload());
         } catch (IOException ex) {
-            logger.error(ex.getMessage());
+            LOGGER.error(ex.getMessage());
             return false;
         }
         return true;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
index 983968b..e057173 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
@@ -25,7 +25,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class SizeTrackingServerAggregators extends ServerAggregators {
-    private static final Logger logger = LoggerFactory.getLogger(SizeTrackingServerAggregators.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SizeTrackingServerAggregators.class);
 
     private final MemoryChunk chunk;
     private final int sizeIncrease;
@@ -50,7 +50,7 @@ public class SizeTrackingServerAggregators extends ServerAggregators {
             expressions[i].reset();
         }
         while(dsize > chunk.getSize()) {
-            logger.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize());
+            LOGGER.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize());
             chunk.resize(chunk.getSize() + sizeIncrease);
         }
         memoryUsed = dsize;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
index 6644a7e..7dfb1d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -125,7 +125,8 @@ public class CollationKeyFunction extends ScalarFunction {
 		byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
 
 		if (LOGGER.isTraceEnabled()) {
-			LOGGER.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+			LOGGER.trace("CollationKey bytes: " +
+					VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
 		}
 
 		ptr.set(collationKeyByteArray);
@@ -167,7 +168,8 @@ public class CollationKeyFunction extends ScalarFunction {
 		}
 
 		if (LOGGER.isTraceEnabled()) {
-			LOGGER.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+			LOGGER.trace(String.format(
+					"Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
 					collator.getStrength(), collator.getDecomposition(),
 					BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
 		}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
index 2eb69bd..df81957 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class RowKeyComparisonFilter extends BooleanExpressionFilter {
-    private static final Logger logger = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
 
     private boolean evaluate = true;
     private boolean keepRow = false;
@@ -70,8 +70,8 @@ public class RowKeyComparisonFilter extends BooleanExpressionFilter {
         if (evaluate) {
             inputTuple.setKey(v.getRowArray(), v.getRowOffset(), v.getRowLength());
             this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple));
-            if (logger.isTraceEnabled()) {
-                logger.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
                         + " row " + inputTuple);
             }
             evaluate = false;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 05baff4..ff8b555 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -325,7 +325,8 @@ public class Indexer extends BaseRegionObserver {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock",
+                          duration, slowPreIncrementThreshold));
               }
               metricSource.incrementSlowDuplicateKeyCheckCalls();
           }
@@ -350,7 +351,8 @@ public class Indexer extends BaseRegionObserver {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate",
+                          duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -504,7 +506,8 @@ public class Indexer extends BaseRegionObserver {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
+                  LOGGER.debug(getCallTooSlowMessage(
+                          "indexPrepare", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -576,7 +579,8 @@ public class Indexer extends BaseRegionObserver {
            long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
            if (duration >= slowIndexWriteThreshold) {
                if (LOGGER.isDebugEnabled()) {
-                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
+                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably",
+                           duration, slowIndexWriteThreshold));
                }
                metricSource.incrementNumSlowIndexWriteCalls();
            }
@@ -616,7 +620,8 @@ public class Indexer extends BaseRegionObserver {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexWriteThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("indexWrite",
+                          duration, slowIndexWriteThreshold));
               }
               metricSource.incrementNumSlowIndexWriteCalls();
           }
@@ -723,7 +728,8 @@ public class Indexer extends BaseRegionObserver {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowPreWALRestoreThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("preWALRestore",
+                          duration, slowPreWALRestoreThreshold));
               }
               metricSource.incrementNumSlowPreWALRestoreCalls();
           }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
index 2df183e..720ad98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java
@@ -22,6 +22,7 @@ import java.util.concurrent.ExecutorService;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 389af36..84ccdb1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -194,7 +194,8 @@ public class IndexManagementUtil {
             LOGGER.info("Rethrowing " + e);
             throw e1;
         } catch (Throwable e1) {
-            LOGGER.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
+            LOGGER.info("Rethrowing " + e1 + " as a " +
+                    IndexBuildingFailureException.class.getSimpleName());
             throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index db7e6a0..363e780 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -101,7 +101,8 @@ public class RecoveryIndexWriter extends IndexWriter {
             ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
             HTableInterfaceReference table = tables.get(ptr);
             if (nonExistingTablesList.contains(table)) {
-                LOGGER.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
+                LOGGER.debug("Edits found for non existing table: " +
+                        table.getTableName() + " so skipping it!!");
                 continue;
             }
             if (table == null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 934e116..88de4d9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -68,7 +68,8 @@ import com.google.common.collect.Multimap;
  * client.
  */
 public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
-    private static final Logger LOGGER = LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
 
     public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
     private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
@@ -171,14 +172,15 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
                                 if (LOGGER.isTraceEnabled()) {
-                                    LOGGER.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
-                                            + ignord);
+                                    LOGGER.trace("indexRegion.batchMutate failed and fall " +
+                                            "back to HTable.batch(). Got error=" + ignord);
                                 }
                             }
                         }
 
                         if (LOGGER.isTraceEnabled()) {
-                            LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference);
+                            LOGGER.trace("Writing index update:" + mutations + " to table: "
+                                    + tableReference);
                         }
                         // if the client can retry index writes, then we don't need to retry here
                         HTableFactory factory;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index a5fe10a..11cb0a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -282,25 +282,29 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                         MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
                                 systemTable, newState);
                         if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                            LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+                            LOGGER.info("Index " + indexTableName +
+                                    " has been dropped. Ignore uncommitted mutations");
                             continue;
                         }
                         if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                             if (leaveIndexActive) {
-                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP "
+                                        + " failed with code = "
                                         + result.getMutationCode());
                                 // If we're not disabling the index, then we don't want to throw as throwing
                                 // will lead to the RS being shutdown.
                                 if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
                                         "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
                             } else {
-                                LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = "
-                                        + result.getMutationCode() + ". Will use default failure policy instead.");
+                                LOGGER.warn("Attempt to disable index " + indexTableName +
+                                        " failed with code = " + result.getMutationCode() +
+                                        ". Will use default failure policy instead.");
                                 throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                             }
                         }
-                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
-                                + " due to an exception while writing updates. indexState=" + newState,
+                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " +
+                                        indexTableName + " due to an exception while" +
+                                        " writing updates. indexState=" + newState,
                             cause);
                     } catch (Throwable t) {
                         if (t instanceof Exception) {
@@ -351,8 +355,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                                         mutation.getRow().length - offset));
                 String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
                 if (indexTableName == null) {
-                    LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of "
-                            + Bytes.toStringBinary(viewId));
+                    LOGGER.error("Unable to find local index on " + ref.getTableName() +
+                            " with viewID of " + Bytes.toStringBinary(viewId));
                 } else {
                     indexTableNames.add(indexTableName);
                 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index d2c2641..454f40d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -141,7 +141,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public abstract class BaseResultIterators extends ExplainTable implements ResultIterators {
-	public static final Logger logger = LoggerFactory.getLogger(BaseResultIterators.class);
+	public static final Logger LOGGER = LoggerFactory.getLogger(BaseResultIterators.class);
     private static final int ESTIMATED_GUIDEPOSTS_PER_REGION = 20;
     private static final int MIN_SEEK_TO_COLUMN_VERSION = VersionUtil.encodeVersion("0", "98", "12");
     private final List<List<Scan>> scans;
@@ -1220,8 +1220,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
      */
     @Override
     public List<PeekingResultIterator> getIterators() throws SQLException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this,
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this,
                     ScanUtil.getCustomAnnotations(scan)));
         }
         boolean isReverse = ScanUtil.isReversed(scan);
@@ -1307,7 +1307,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                             Scan oldScan = scanPair.getFirst();
                             byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
                             if(e2 instanceof HashJoinCacheNotFoundException){
-                                logger.debug(
+                                LOGGER.debug(
                                         "Retrying when Hash Join cache is not found on the server ,by sending the cache again");
                                 if(retryCount<=0){
                                     throw e2;
@@ -1444,7 +1444,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                     Thread.currentThread().interrupt();
                     throw new RuntimeException(e);
                 } catch (ExecutionException e) {
-                    logger.info("Failed to execute task during cancel", e);
+                    LOGGER.info("Failed to execute task during cancel", e);
                     continue;
                 }
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index 1aab2d5..2fb7b72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -54,7 +54,7 @@ import com.google.common.base.Preconditions;
  */
 @Deprecated
 public class ChunkedResultIterator implements PeekingResultIterator {
-    private static final Logger logger = LoggerFactory.getLogger(ChunkedResultIterator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ChunkedResultIterator.class);
 
     private final ParallelIteratorFactory delegateIteratorFactory;
     private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
@@ -89,7 +89,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
 
         @Override
         public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException {
-            if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
             return new ChunkedResultIterator(delegateFactory, mutationState, context, tableRef, scan, 
                     mutationState.getConnection().getQueryServices().getProps().getLong(
                                 QueryServices.SCAN_RESULT_CHUNK_SIZE,
@@ -110,7 +110,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
         // Instantiate single chunk iterator and the delegate iterator in constructor
         // to get parallel scans kicked off in separate threads. If we delay this,
         // we'll get serialized behavior (see PHOENIX-
-        if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+        if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
         ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(scanner, chunkSize);
         String tableName = tableRef.getTable().getPhysicalName().getString();
         resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName, plan);
@@ -149,7 +149,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
             } else {
                 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
             }
-            if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+            if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
             String tableName = tableRef.getTable().getPhysicalName().getString();
             ReadMetricQueue readMetrics = context.getReadMetricsQueue();
             ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 262ae44..3d5c96b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -52,7 +52,7 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class ParallelIterators extends BaseResultIterators {
-	private static final Logger logger = LoggerFactory.getLogger(ParallelIterators.class);
+	private static final Logger LOGGER = LoggerFactory.getLogger(ParallelIterators.class);
 	private static final String NAME = "PARALLEL";
     private final ParallelIteratorFactory iteratorFactory;
     private final boolean initFirstScanOnly;
@@ -122,8 +122,8 @@ public class ParallelIterators extends BaseResultIterators {
                 @Override
                 public PeekingResultIterator call() throws Exception {
                     long startTime = System.currentTimeMillis();
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
                     }
                     PeekingResultIterator iterator = iteratorFactory.newIterator(context, tableResultItr, scan, physicalTableName, ParallelIterators.this.plan);
                     if (initFirstScanOnly) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
index 5624f5f..bc77c98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
@@ -49,7 +49,7 @@ import com.google.common.base.Throwables;
  */
 public class RoundRobinResultIterator implements ResultIterator {
 
-    private static final Logger logger = LoggerFactory.getLogger(RoundRobinResultIterator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RoundRobinResultIterator.class);
 
     private final int threshold;
 
@@ -223,8 +223,8 @@ public class RoundRobinResultIterator implements ResultIterator {
             final ConnectionQueryServices services = context.getConnection().getQueryServices();
             ExecutorService executor = services.getExecutor();
             numParallelFetches++;
-            if (logger.isDebugEnabled()) {
-                logger.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
             }
             for (final RoundRobinIterator itr : openIterators) {
                 Future<Tuple> future = executor.submit(new Callable<Tuple>() {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
index 8a15074..66eff3b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
@@ -81,7 +81,7 @@ public class TableResultIterator implements ResultIterator {
     private final long renewLeaseThreshold;
     private final QueryPlan plan;
     private final ParallelScanGrouper scanGrouper;
-    private static final Logger logger = LoggerFactory.getLogger(TableResultIterator.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TableResultIterator.class);
     private Tuple lastTuple = null;
     private ImmutableBytesWritable ptr = new ImmutableBytesWritable();
     @GuardedBy("renewLeaseLock")
@@ -190,7 +190,7 @@ public class TableResultIterator implements ResultIterator {
                             }
                         }
                         plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getTableName());
-                        logger.debug(
+                        LOGGER.debug(
                                 "Retrying when Hash Join cache is not found on the server ,by sending the cache again");
                         if (retry <= 0) {
                             throw e1;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 67ac9c9..8ac5375 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -61,7 +61,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * @since 0.1
  */
 public final class PhoenixDriver extends PhoenixEmbeddedDriver {
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixDriver.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixDriver.class);
     public static final PhoenixDriver INSTANCE;
     private static volatile String driverShutdownMsg;
     static {
@@ -100,11 +100,11 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
                             // policy). We don't care about any exceptions, we're going down anyways.
                             future.get(millisBeforeShutdown, TimeUnit.MILLISECONDS);
                         } catch (ExecutionException e) {
-                            logger.warn("Failed to close instance", e);
+                            LOGGER.warn("Failed to close instance", e);
                         } catch (InterruptedException e) {
-                            logger.warn("Interrupted waiting to close instance", e);
+                            LOGGER.warn("Interrupted waiting to close instance", e);
                         } catch (TimeoutException e) {
-                            logger.warn("Timed out waiting to close instance", e);
+                            LOGGER.warn("Timed out waiting to close instance", e);
                         } finally {
                             // We're going down, but try to clean up.
                             svc.shutdownNow();
@@ -116,7 +116,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
                 // Don't want to register it if we're already in the process of going down.
                 DriverManager.registerDriver(INSTANCE);
             } catch (IllegalStateException e) {
-                logger.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
+                LOGGER.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
 
                 // Close the instance now because we don't have the shutdown hook
                 closeInstance(INSTANCE);
@@ -132,7 +132,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
         try {
             instance.close();
         } catch (SQLException e) {
-            logger.warn("Unable to close PhoenixDriver on shutdown", e);
+            LOGGER.warn("Unable to close PhoenixDriver on shutdown", e);
         } finally {
             driverShutdownMsg = "Phoenix driver closed because server is shutting down";
         }
@@ -156,14 +156,14 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
                 @Override
                 public void onRemoval(RemovalNotification<ConnectionInfo, ConnectionQueryServices> notification) {
                     String connInfoIdentifier = notification.getKey().toString();
-                    logger.debug("Expiring " + connInfoIdentifier + " because of "
+                    LOGGER.debug("Expiring " + connInfoIdentifier + " because of "
                         + notification.getCause().name());
 
                     try {
                         notification.getValue().close();
                     }
                     catch (SQLException se) {
-                        logger.error("Error while closing expired cache connection " + connInfoIdentifier, se);
+                        LOGGER.error("Error while closing expired cache connection " + connInfoIdentifier, se);
                     }
                 }
             };
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 01d1072..91f04d7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -67,7 +67,6 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
     /**
      * The protocol for Phoenix Network Client 
      */ 
-    private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(PhoenixEmbeddedDriver.class);
     private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//";
     private final static String DRIVER_NAME = "PhoenixEmbeddedDriver";
     private static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -195,7 +194,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
      * @since 0.1.1
      */
     public static class ConnectionInfo {
-        private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ConnectionInfo.class);
+        private static final org.slf4j.Logger LOGGER =
+                LoggerFactory.getLogger(ConnectionInfo.class);
         private static final Object KERBEROS_LOGIN_LOCK = new Object();
         private static final char WINDOWS_SEPARATOR_CHAR = '\\';
         private static final String REALM_EQUIVALENCY_WARNING_MSG = "Provided principal does not contan a realm and the default realm cannot be determined. Ignoring realm equivalency check.";
@@ -376,23 +376,25 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                                 currentUser = UserGroupInformation.getCurrentUser();
                                 if (!currentUser.hasKerberosCredentials() || !isSameName(currentUser.getUserName(), principal)) {
                                     final Configuration config = getConfiguration(props, info, principal, keytab);
-                                    logger.info("Trying to connect to a secure cluster as {} with keytab {}", config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
+                                    LOGGER.info("Trying to connect to a secure cluster as {} " +
+                                                    "with keytab {}", config.get(
+                                                            QueryServices.HBASE_CLIENT_PRINCIPAL),
                                             config.get(QueryServices.HBASE_CLIENT_KEYTAB));
                                     UserGroupInformation.setConfiguration(config);
                                     User.login(config, QueryServices.HBASE_CLIENT_KEYTAB, QueryServices.HBASE_CLIENT_PRINCIPAL, null);
-                                    logger.info("Successful login to secure cluster");
+                                    LOGGER.info("Successful login to secure cluster");
                                 }
                             }
                         } else {
                             // The user already has Kerberos creds, so there isn't anything to change in the ConnectionInfo.
-                            logger.debug("Already logged in as {}", currentUser);
+                            LOGGER.debug("Already logged in as {}", currentUser);
                         }
                     } catch (IOException e) {
                         throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
                             .setRootCause(e).build().buildException();
                     }
                 } else {
-                    logger.debug("Principal and keytab not provided, not attempting Kerberos login");
+                    LOGGER.debug("Principal and keytab not provided, not attempting Kerberos login");
                 }
             } // else, no connection, no need to login
             // Will use the current User from UGI
@@ -631,7 +633,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                 throw getMalFormedUrlException(url);
             }
             String znodeParent = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            LOGGER.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
+            LOGGER.debug("Getting default jdbc connection url "
+                    + quorum + ":" + port + ":" + znodeParent);
             return new ConnectionInfo(quorum, port, znodeParent);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 015f04c..46ddfd0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -213,7 +213,7 @@ import com.google.common.math.IntMath;
  */
 public class PhoenixStatement implements Statement, SQLCloseable {
 	
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixStatement.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatement.class);
     
     public enum Operation {
         QUERY("queried", false),
@@ -310,9 +310,10 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                          // this will create its own trace internally, so we don't wrap this
                          // whole thing in tracing
                         ResultIterator resultIterator = plan.iterator();
-                        if (logger.isDebugEnabled()) {
+                        if (LOGGER.isDebugEnabled()) {
                             String explainPlan = QueryUtil.getExplainPlan(resultIterator);
-                            logger.debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection));
+                            LOGGER.debug(LogUtil.addCustomAnnotations(
+                                    "Explain plan: " + explainPlan, connection));
                         }
                         StatementContext context = plan.getContext();
                         context.setQueryLogger(queryLogger);
@@ -337,8 +338,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                     //Force update cache and retry if meta not found error occurs
                     catch (MetaDataEntityNotFoundException e) {
                         if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
-                            if(logger.isDebugEnabled())
-                                logger.debug("Reloading table "+ e.getTableName()+" data from server");
+                            if(LOGGER.isDebugEnabled())
+                                LOGGER.debug("Reloading table "
+                                        + e.getTableName()+" data from server");
                             if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
                                 e.getSchemaName(), e.getTableName(), true).wasUpdated()){
                                 //TODO we can log retry count and error for debugging in LOG table
@@ -423,8 +425,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                             //Force update cache and retry if meta not found error occurs
                             catch (MetaDataEntityNotFoundException e) {
                                 if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
-                                    if(logger.isDebugEnabled())
-                                        logger.debug("Reloading table "+ e.getTableName()+" data from server");
+                                    if(LOGGER.isDebugEnabled())
+                                        LOGGER.debug("Reloading table "+ e.getTableName()
+                                                +" data from server");
                                     if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
                                         e.getSchemaName(), e.getTableName(), true).wasUpdated()){
                                         return executeMutation(stmt, false);
@@ -1753,8 +1756,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
     }
 
     public MutationPlan compileMutation(String sql) throws SQLException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection));
         }
         CompilableStatement stmt = parseStatement(sql);
         return compileMutation(stmt, sql);
@@ -1782,8 +1785,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
     
     @Override
     public ResultSet executeQuery(String sql) throws SQLException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(LogUtil.addCustomAnnotations("Execute query: " + sql, connection));
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Execute query: " + sql, connection));
         }
         
         CompilableStatement stmt = parseStatement(sql);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
index 7433f6a..27d4ba4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
@@ -41,7 +41,7 @@ public class QueryLogger {
     private LogLevel logLevel;
     private Builder<QueryLogInfo, Object> queryLogBuilder = ImmutableMap.builder();
     private boolean isSynced;
-    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryLogger.class);
     
     private QueryLogger(PhoenixConnection connection) {
         this.queryId = UUID.randomUUID().toString();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index c4f227a..1d3ebc9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -76,8 +76,9 @@ public class QueryLoggerDisruptor implements Closeable{
 
         final QueryLogDetailsEventHandler[] handlers = { new QueryLogDetailsEventHandler(configuration) };
         disruptor.handleEventsWith(handlers);
-        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
-                + ", waitStrategy=" + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
+        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" +
+                disruptor.getRingBuffer().getBufferSize() + ", waitStrategy=" +
+                waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
                 + errorHandler + "...");
         disruptor.start();
         
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index cc23c43..d476e4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -77,7 +77,8 @@ import com.google.common.collect.Lists;
 public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWritable, Text, TableRowkeyPair,
         ImmutableBytesWritable> {
 
-    protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
+    protected static final Logger LOGGER =
+            LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
 
     protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import";
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index a8de1d1..784d58e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -97,7 +97,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
 
     public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
         "hbase.mapreduce.hfileoutputformat.datablock.encoding";
-    
+
     /* Delimiter property used to separate table name and column family */
     private static final String AT_DELIMITER = "@";
     
@@ -692,7 +692,8 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
            }
        }
     
-       LOGGER.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
+       LOGGER.info("Configuring " + tablesStartKeys.size() +
+               " reduce partitions to match current region count");
        job.setNumReduceTasks(tablesStartKeys.size());
 
        configurePartitioner(job, tablesStartKeys);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index 0eb1fce..bb65730 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -881,7 +881,8 @@ public class OrphanViewTool extends Configured implements Tool {
             }
             return 0;
         } catch (Exception ex) {
-            LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
+            LOGGER.error("Orphan View Tool : An exception occurred "
+                    + ExceptionUtils.getMessage(ex) + " at:\n" +
                     ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index 6db721d..318ccfd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -158,7 +158,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
 
             this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
         } catch (SQLException e) {
-            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",
+                    e.getMessage()));
             Throwables.propagate(e);
         }
    }
@@ -179,7 +180,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
             value.readFields(resultSet);
             return true;
         } catch (SQLException e) {
-            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",
+                    e.getMessage()));
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
index 7bd30e6..1abcef4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
@@ -50,7 +50,8 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getInde
 public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends PhoenixInputFormat {
     QueryPlan queryPlan = null;
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
 
     /**
      * instantiated by framework
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
index ce8d550..04de360 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
@@ -110,8 +110,9 @@ public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>
 			Map<String, Object> data = new HashMap<>();
 			Matcher m = inputPattern.matcher(input);
 			if (m.groupCount() != columnInfoList.size()) {
-				LOGGER.debug(String.format("based on the regex and input, input fileds %s " +
-                        "size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
+				LOGGER.debug(String.format("based on the regex and input, input fileds %s size " +
+                        "doesn't match the table columns %s size", m.groupCount(),
+                        columnInfoList.size()));
 				return data;
 			}
 			
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index fe193b5..72451af 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -640,9 +640,9 @@ public class IndexTool extends Configured implements Tool {
                     int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt);
                     String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt());
                     double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt);
-                    LOGGER.info(String.format(
-                            "Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s",
-                            indexTable, autosplit, autosplitNumRegions, samplingRate));
+                    LOGGER.info(String.format("Will split index %s , autosplit=%s ," +
+                            " autoSplitNumRegions=%s , samplingRate=%s", indexTable,
+                            autosplit, autosplitNumRegions, samplingRate));
 
                     splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, samplingRate, configuration);
                 }
@@ -664,7 +664,8 @@ public class IndexTool extends Configured implements Tool {
                 job.submit();
                 return 0;
             }
-            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
+            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete." +
+                    " This may take a long time!.");
             boolean result = job.waitForCompletion(true);
             
             if (result) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index b168032..6148b6c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -59,7 +59,8 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
 
     private PhoenixConnection connection;
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
index 0544d02..35173bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
@@ -43,8 +43,6 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Mapper that does not do much as regions servers actually build the index from the data table regions directly
@@ -52,8 +50,6 @@ import org.slf4j.LoggerFactory;
 public class PhoenixServerBuildIndexMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
-
     @Override
     protected void setup(final Context context) throws IOException, InterruptedException {
         super.setup(context);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 954ee23..c0fdcbb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -237,7 +237,8 @@ public class PhoenixMRJobSubmitter {
 
         if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
             AUTO_INDEX_BUILD_LOCK_NAME)) {
-            LOGGER.info("Some other node is already running Automated Index Build. Skipping execution!");
+            LOGGER.info("Some other node is already running Automated Index Build." +
+                    " Skipping execution!");
             return -1;
         }
         // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index c756aae..2bcdf12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -377,9 +377,9 @@ public final class PhoenixConfigurationUtil {
             final Configuration configuration) {
     	List<String> selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration);
         if(!selectColumnList.isEmpty()) {
-            LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
-                    ,!selectColumnList.isEmpty(), selectColumnList.size(), Joiner.on(",").join(selectColumnList)
-                    ));
+            LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, " +
+                            "selectColumnList=%s ",!selectColumnList.isEmpty(),
+                    selectColumnList.size(), Joiner.on(",").join(selectColumnList)));
         }
         return selectColumnList;
     }
@@ -689,7 +689,7 @@ public final class PhoenixConfigurationUtil {
 					if (tenantId != null) {
 						tenantId = null;
 					} else {
-						BaseResultIterators.logger.warn(
+						BaseResultIterators.LOGGER.warn(
 								"Unable to find parent table \"" + parentTableName + "\" of table \""
 										+ table.getName().getString() + "\" to determine USE_STATS_FOR_PARALLELIZATION",
 								e);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
index fe0d6d7..0095021 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
  * @since 0.1
  */
 public class GlobalMemoryManager implements MemoryManager {
-    private static final Logger logger = LoggerFactory.getLogger(GlobalMemoryManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMemoryManager.class);
 
     private final Object sync = new Object();
     private final long maxMemoryBytes;
@@ -149,7 +149,7 @@ public class GlobalMemoryManager implements MemoryManager {
         protected void finalize() throws Throwable {
             try {
                 if (size > 0) {
-                    logger.warn("Orphaned chunk of " + size + " bytes found during finalize");
+                    LOGGER.warn("Orphaned chunk of " + size + " bytes found during finalize");
                     //logger.warn("Orphaned chunk of " + size + " bytes found during finalize allocated here:\n" + stack);
                 }
                 freeMemory();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 1ab402e..003d4d2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -267,7 +267,8 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
-    private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
     private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
     private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000;
     private static final int TTL_FOR_MUTEX = 15 * 60; // 15min 
@@ -415,7 +416,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             try {
                 this.queryDisruptor = new QueryLoggerDisruptor(this.config);
             } catch (SQLException e) {
-                logger.warn("Unable to initiate qeuery logging service !!");
+                LOGGER.warn("Unable to initiate qeuery logging service !!");
                 e.printStackTrace();
             }
         }
@@ -426,7 +427,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             this.connection = HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
             GLOBAL_HCONNECTIONS_COUNTER.increment();
-            logger.info("HConnection established. Stacktrace for informational purposes: " + connection + " " +  LogUtil.getCallerStackTrace());
+            LOGGER.info("HConnection established. Stacktrace for informational purposes: "
+                    + connection + " " +  LogUtil.getCallerStackTrace());
         } catch (IOException e) {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
             .setRootCause(e).build().buildException();
@@ -661,7 +663,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             mutator.mutate(metaData);
                             break;
                         } else if (table.getSequenceNumber() >= tableSeqNum) {
-                            logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
+                            LOGGER.warn("Attempt to cache older version of " + tableName +
+                                    ": current= " + table.getSequenceNumber() +
+                                    ", new=" + tableSeqNum);
                             break;
                         }
                     } catch (TableNotFoundException e) {
@@ -670,7 +674,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // We waited long enough - just remove the table from the cache
                     // and the next time it's used it'll be pulled over from the server.
                     if (waitTime <= 0) {
-                        logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
+                        LOGGER.warn("Unable to update meta data repo within " +
+                                (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) +
+                                " seconds for " + tableName);
                         // There will never be a parentTableName here, as that would only
                         // be non null for an index an we never add/remove columns from an index.
                         metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
@@ -1038,8 +1044,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     + watch.elapsedMillis() + " ms "
                     + (numTries > 1 ? ("after trying " + numTries + (numTries > 1 ? "times." : "time.")) : ""));
         } else {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Operation "
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Operation "
                         + op.getOperationName()
                         + " completed within "
                         + watch.elapsedMillis()
@@ -1099,7 +1105,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try (HBaseAdmin admin = getAdmin()) {
             final String quorum = ZKConfig.getZKQuorumServersString(config);
             final String znode = this.getProps().get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            logger.debug("Found quorum: " + quorum + ":" + znode);
+            LOGGER.debug("Found quorum: " + quorum + ":" + znode);
 
             if (isMetaTable) {
                 if(SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) {
@@ -1356,7 +1362,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 try {
                     ht.close();
                 } catch (IOException e) {
-                    logger.warn("Could not close HTable", e);
+                    LOGGER.warn("Could not close HTable", e);
                 }
             }
         }
@@ -2416,9 +2422,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             stmt.executeUpdate();
             metaConnection.commit();
         } catch (NewerTableAlreadyExistsException e) {
-            logger.warn("Table already modified at this timestamp, so assuming column already nullable: " + columnName);
+            LOGGER.warn("Table already modified at this timestamp," +
+                    " so assuming column already nullable: " + columnName);
         } catch (SQLException e) {
-            logger.warn("Add column failed due to:" + e);
+            LOGGER.warn("Add column failed due to:" + e);
             sqlE = e;
         } finally {
             try {
@@ -2448,9 +2455,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns );
         } catch (NewerTableAlreadyExistsException e) {
-            logger.warn("Table already modified at this timestamp, so assuming add of these columns already done: " + columns);
+            LOGGER.warn("Table already modified at this timestamp," +
+                    " so assuming add of these columns already done: " + columns);
         } catch (SQLException e) {
-            logger.warn("Add column failed due to:" + e);
+            LOGGER.warn("Add column failed due to:" + e);
             sqlE = e;
         } finally {
             try {
@@ -2554,7 +2562,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         boolean success = false;
                         try {
                             GLOBAL_QUERY_SERVICES_COUNTER.increment();
-                            logger.info("An instance of ConnectionQueryServices was created.");
+                            LOGGER.info("An instance of ConnectionQueryServices was created.");
                             openConnection();
                             hConnectionEstablished = true;
                             boolean isDoNotUpgradePropSet = UpgradeUtil.isNoUpgradeSet(props);
@@ -2594,7 +2602,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     }
                                     if (foundAccessDeniedException) {
                                         // Pass
-                                        logger.warn("Could not check for Phoenix SYSTEM tables, assuming they exist and are properly configured");
+                                        LOGGER.warn("Could not check for Phoenix SYSTEM tables," +
+                                                " assuming they exist and are properly configured");
                                         checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName());
                                         success = true;
                                     } else if (!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), NamespaceNotFoundException.class))) {
@@ -2627,7 +2636,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     upgradeSystemTables(url, props);
                                 } else {
                                     // We expect the user to manually run the "EXECUTE UPGRADE" command first.
-                                    logger.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
+                                    LOGGER.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
                                             + "before any other command");
                                 }
                             }
@@ -2681,7 +2690,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             if(admin.tableExists(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME) || admin.tableExists(TableName.valueOf(
                     PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME))) {
-                logger.debug("System mutex table already appears to exist, not creating it");
+                LOGGER.debug("System mutex table already appears to exist, not creating it");
                 return;
             }
             final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
@@ -2703,6 +2712,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class)) ||
                     !Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), org.apache.hadoop.hbase.TableNotFoundException.class))) {
                 // Ignore
+
             } else {
                 throw e;
             }
@@ -2743,7 +2753,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             createSysMutexTableIfNotExists(hbaseAdmin);
         } catch (IOException exception) {
-            logger.error("Failed to created SYSMUTEX table. Upgrade or migration is not possible without it. Please retry.");
+            LOGGER.error("Failed to created SYSMUTEX table. Upgrade or migration is not possible without it. Please retry.");
             throw exception;
         }
     }
@@ -2853,7 +2863,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                  * column BASE_COLUMN_COUNT is already part of the meta-data schema as the
                  * signal that the server side upgrade has finished or is in progress.
                  */
-                logger.debug("No need to run 4.5 upgrade");
+                LOGGER.debug("No need to run 4.5 upgrade");
             }
             Properties p = PropertiesUtil.deepCopy(metaConnection.getClientInfo());
             p.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB);
@@ -2865,18 +2875,21 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 List<String> tablesNeedingUpgrade = UpgradeUtil
                   .getPhysicalTablesWithDescRowKey(conn);
                 if (!tablesNeedingUpgrade.isEmpty()) {
-                    logger.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
+                    LOGGER.warn("The following tables require upgrade due to a bug " +
+                            "causing the row key to be incorrect for descending columns " +
+                            "and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
                       + Joiner.on(' ').join(tablesNeedingUpgrade)
                       + "\nTo upgrade issue the \"bin/psql.py -u\" command.");
                 }
                 List<String> unsupportedTables = UpgradeUtil
                   .getPhysicalTablesWithDescVarbinaryRowKey(conn);
                 if (!unsupportedTables.isEmpty()) {
-                    logger.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
+                    LOGGER.warn("The following tables use an unsupported " +
+                            "VARBINARY DESC construct and need to be changed:\n"
                       + Joiner.on(' ').join(unsupportedTables));
                 }
             } catch (Exception ex) {
-                logger.error(
+                LOGGER.error(
                   "Unable to determine tables requiring upgrade due to PHOENIX-2067",
                   ex);
             } finally {
@@ -3067,7 +3080,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, ConnectionQueryServicesImpl.this.getProps())) {
                     // Try acquiring a lock in SYSMUTEX table before migrating the tables since it involves disabling the table.
                     if (acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, mutexRowKey)) {
-                        logger.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace "
+                        LOGGER.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace "
                           + "and/or upgrading " + sysCatalogTableName);
                     }
                     // We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
@@ -3075,7 +3088,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // If SYSTEM tables exist, they are migrated to HBase SYSTEM namespace
                     // If they don't exist or they're already migrated, this method will return immediately
                     ensureSystemTablesMigratedToSystemNamespace();
-                    logger.debug("Migrated SYSTEM tables to SYSTEM namespace");
+                    LOGGER.debug("Migrated SYSTEM tables to SYSTEM namespace");
                     metaConnection = upgradeSystemCatalogIfRequired(metaConnection, e.getSystemCatalogTimeStamp());
                 }
             } catch (TableAlreadyExistsException e) {
@@ -3086,11 +3099,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // it means some old client is either migrating SYSTEM tables or trying to upgrade the schema of
                     // SYSCAT table and hence it should not be interrupted
                     if (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp, mutexRowKey)) {
-                        logger.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName);
+                        LOGGER.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName);
                         snapshotName = getSysCatalogSnapshotName(currentServerSideTableTimeStamp);
                         createSnapshot(snapshotName, sysCatalogTableName);
                         snapshotCreated = true;
-                        logger.debug("Created snapshot for SYSCAT");
+                        LOGGER.debug("Created snapshot for SYSCAT");
                     }
                     // We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
                 }
@@ -3227,7 +3240,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         try {
                             releaseUpgradeMutex(mutexRowKey);
                         } catch (IOException e) {
-                            logger.warn("Release of upgrade mutex failed ", e);
+                            LOGGER.warn("Release of upgrade mutex failed ", e);
                         }
                     }
                 }
@@ -3321,7 +3334,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             admin = getAdmin();
             admin.snapshot(snapshotName, tableName);
-            logger.info("Successfully created snapshot " + snapshotName + " for "
+            LOGGER.info("Successfully created snapshot " + snapshotName + " for "
                     + tableName);
         } catch (Exception e) {
             sqlE = new SQLException(e);
@@ -3353,14 +3366,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             SQLException sqlE = null;
             HBaseAdmin admin = null;
             try {
-                logger.warn("Starting restore of " + tableName + " using snapshot "
+                LOGGER.warn("Starting restore of " + tableName + " using snapshot "
                         + snapshotName + " because upgrade failed");
                 admin = getAdmin();
                 admin.disableTable(tableName);
                 tableDisabled = true;
                 admin.restoreSnapshot(snapshotName);
                 snapshotRestored = true;
-                logger.warn("Successfully restored " + tableName + " using snapshot "
+                LOGGER.warn("Successfully restored " + tableName + " using snapshot "
                         + snapshotName);
             } catch (Exception e) {
                 sqlE = new SQLException(e);
@@ -3369,10 +3382,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     try {
                         admin.enableTable(tableName);
                         if (snapshotRestored) {
-                            logger.warn("Successfully restored and enabled " + tableName + " using snapshot "
+                            LOGGER.warn("Successfully restored and enabled " + tableName + " using snapshot "
                                     + snapshotName);
                         } else {
-                            logger.warn("Successfully enabled " + tableName + " after restoring using snapshot "
+                            LOGGER.warn("Successfully enabled " + tableName + " after restoring using snapshot "
                                     + snapshotName + " failed. ");
                         }
                     } catch (Exception e1) {
@@ -3382,7 +3395,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         } else {
                             sqlE.setNextException(enableTableEx);
                         }
-                        logger.error("Failure in enabling "
+                        LOGGER.error("Failure in enabling "
                                 + tableName
                                 + (snapshotRestored ? " after successfully restoring using snapshot"
                                         + snapshotName
@@ -3420,11 +3433,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             if (tableNames.size() == 0) { return; }
             // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:"
             if (tableNames.size() > 5) {
-                logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);
+                LOGGER.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);
             }
 
             // Handle the upgrade of SYSMUTEX table separately since it doesn't have any entries in SYSCAT
-            logger.info("Migrating SYSTEM.MUTEX table to SYSTEM namespace.");
+            LOGGER.info("Migrating SYSTEM.MUTEX table to SYSTEM namespace.");
             String sysMutexSrcTableName = PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME;
             String sysMutexDestTableName = SchemaUtil.getPhysicalName(sysMutexSrcTableName.getBytes(), this.getProps()).getNameAsString();
             UpgradeUtil.mapTableToNamespace(admin, sysMutexSrcTableName, sysMutexDestTableName, PTableType.SYSTEM);
@@ -3435,7 +3448,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             metatable = getTable(mappedSystemTable);
             if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
                 if (!admin.tableExists(mappedSystemTable)) {
-                    logger.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
+                    LOGGER.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
                     // Actual migration of SYSCAT table
                     UpgradeUtil.mapTableToNamespace(admin, metatable,
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, this.getProps(), null, PTableType.SYSTEM,
@@ -3448,7 +3461,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
             }
             for (TableName table : tableNames) {
-                logger.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
+                LOGGER.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
                 UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), this.getProps(), null, PTableType.SYSTEM,
                         null);
                 ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null,
@@ -3528,7 +3541,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             put.addColumn(family, qualifier, newValue);
             released = sysMutexTable.checkAndPut(mutexRowKey, family, qualifier, expectedValue, put);
         } catch (Exception e) {
-            logger.warn("Release of upgrade mutex failed", e);
+            LOGGER.warn("Release of upgrade mutex failed", e);
         }
         return released;
     }
@@ -3581,7 +3594,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             " B.COLUMN_FAMILY IS NOT NULL AND\n" +
                             " A.IMMUTABLE_ROWS = TRUE");
         } catch (SQLException e) {
-            logger.warn("exception during upgrading stats table:" + e);
+            LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
         } finally {
             try {
@@ -3622,7 +3635,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, DISABLE_WAL)\n" +
                             "VALUES (NULL, '" + QueryConstants.SYSTEM_SCHEMA_NAME + "','" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "', NULL, NULL, FALSE)");
         } catch (SQLException e) {
-            logger.warn("exception during upgrading stats table:" + e);
+            LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
         } finally {
             try {
@@ -3657,7 +3670,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     + "' AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='"
                     + PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + "'");
         } catch (SQLException e) {
-            logger.warn("exception during upgrading stats table:" + e);
+            LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
         } finally {
             try {
@@ -4480,7 +4493,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     // add it back at the tail
                                     scannerQueue.offer(new WeakReference<TableResultIterator>(
                                             scanningItr));
-                                    logger.info("Lease renewed for scanner: " + scanningItr);
+                                    LOGGER.info("Lease renewed for scanner: " + scanningItr);
                                     break;
                                 // Scanner not initialized probably because next() hasn't been called on it yet. Enqueue it back to attempt lease renewal later.
                                 case UNINITIALIZED:
@@ -4502,7 +4515,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             numScanners--;
                         }
                         if (renewed > 0) {
-                            logger.info("Renewed leases for " + renewed + " scanner/s in "
+                            LOGGER.info("Renewed leases for " + renewed + " scanner/s in "
                                     + (System.currentTimeMillis() - start) + " ms ");
                         }
                         connectionsQueue.offer(connRef);
@@ -4510,7 +4523,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     numConnections--;
                 }
             } catch (InternalRenewLeaseTaskException e) {
-                logger.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
+                LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
                 // clear up the queue since the task is about to be unscheduled.
                 connectionsQueue.clear();
                 // throw an exception since we want the task execution to be suppressed because we just encountered an
@@ -4518,13 +4531,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 throw new RuntimeException(e);
             } catch (InterruptedException e) {
                 Thread.currentThread().interrupt(); // restore the interrupt status
-                logger.error("Thread interrupted when renewing lease.", e);
+                LOGGER.error("Thread interrupted when renewing lease.", e);
             } catch (Exception e) {
-                logger.error("Exception thrown when renewing lease ", e);
+                LOGGER.error("Exception thrown when renewing lease ", e);
                 // don't drain the queue and swallow the exception in this case since we don't want the task
                 // execution to be suppressed because renewing lease of a scanner failed.
             } catch (Throwable e) {
-                logger.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
+                LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
                 connectionsQueue.clear(); // clear up the queue since the task is about to be unscheduled.
                 throw new RuntimeException(e);
             }
@@ -4647,7 +4660,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try (HBaseAdmin admin = getAdmin()) {
             final String quorum = ZKConfig.getZKQuorumServersString(config);
             final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            logger.debug("Found quorum: " + quorum + ":" + znode);
+            LOGGER.debug("Found quorum: " + quorum + ":" + znode);
             boolean nameSpaceExists = true;
             try {
                 admin.getNamespaceDescriptor(schemaName);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 3dead42..6d39c25 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -256,7 +256,7 @@ import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
 
 public class MetaDataClient {
-    private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataClient.class);
 
     private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
     private static final String SET_ASYNC_CREATED_DATE =
@@ -1685,7 +1685,7 @@ public class MetaDataClient {
             return new MutationState(0, 0, connection);
         }
 
-        if (logger.isInfoEnabled()) logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
+        if (LOGGER.isInfoEnabled()) LOGGER.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
         boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(
                 QueryServices.INDEX_ASYNC_BUILD_ENABLED,
                 QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
@@ -3070,8 +3070,8 @@ public class MetaDataClient {
             break;
         case CONCURRENT_TABLE_MUTATION:
             addTableToCache(result);
-            if (logger.isDebugEnabled()) {
-                logger.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
             }
             throw new ConcurrentTableMutationException(schemaName, tableName);
         case NEWER_TABLE_FOUND:
@@ -3289,8 +3289,8 @@ public class MetaDataClient {
                 int nNewColumns = numCols;
                 List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
                 List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
-                if (logger.isDebugEnabled()) {
-                    logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
                 }
 
                 int position = table.getColumns().size();
@@ -3586,8 +3586,8 @@ public class MetaDataClient {
                     if (retried) {
                         throw e;
                     }
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
                     }
                     retried = true;
                 }
@@ -4176,7 +4176,7 @@ public class MetaDataClient {
      */
     public MutationState changePermissions(ChangePermsStatement changePermsStatement) throws SQLException {
 
-        logger.info(changePermsStatement.toString());
+        LOGGER.info(changePermsStatement.toString());
 
         try(HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
             ClusterConnection clusterConnection = (ClusterConnection) admin.getConnection();
@@ -4247,7 +4247,7 @@ public class MetaDataClient {
                 inconsistentTables.add(indexTable);
                 continue;
             }
-            logger.info("Updating permissions for Index Table: " +
+            LOGGER.info("Updating permissions for Index Table: " +
                     indexTable.getName() + " Base Table: " + inputTable.getName());
             tableName = SchemaUtil.getPhysicalTableName(indexTable.getPhysicalName().getBytes(), indexTable.isNamespaceMapped());
             changePermsOnTable(clusterConnection, changePermsStatement, tableName);
@@ -4255,7 +4255,7 @@ public class MetaDataClient {
 
         if(schemaInconsistency) {
             for(PTable table : inconsistentTables) {
-                logger.error("Fail to propagate permissions to Index Table: " + table.getName());
+                LOGGER.error("Fail to propagate permissions to Index Table: " + table.getName());
             }
             throw new TablesNotInSyncException(inputTable.getTableName().getString(),
                     inconsistentTables.get(0).getTableName().getString(), "Namespace properties");
@@ -4266,13 +4266,13 @@ public class MetaDataClient {
         tableName = org.apache.hadoop.hbase.TableName.valueOf(viewIndexTableBytes);
         boolean viewIndexTableExists = admin.tableExists(tableName);
         if(viewIndexTableExists) {
-            logger.info("Updating permissions for View Index Table: " +
+            LOGGER.info("Updating permissions for View Index Table: " +
                     Bytes.toString(viewIndexTableBytes) + " Base Table: " + inputTable.getName());
             changePermsOnTable(clusterConnection, changePermsStatement, tableName);
         } else {
             if(inputTable.isMultiTenant()) {
-                logger.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
-                logger.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString());
+                LOGGER.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
+                LOGGER.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString());
                 throw new TablesNotInSyncException(inputTable.getTableName().getString(),
                         Bytes.toString(viewIndexTableBytes), " View Index table should exist for MultiTenant tables");
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 3919a7c..a8a42a9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -67,7 +67,6 @@ import com.google.common.collect.Maps;
  * A default implementation of the Statistics tracker that helps to collect stats like min key, max key and guideposts.
  */
 class DefaultStatisticsCollector implements StatisticsCollector {
-    private static final Logger logger = LoggerFactory.getLogger(DefaultStatisticsCollector.class);
     private final Map<ImmutableBytesPtr, Pair<Long, GuidePostsInfoBuilder>> guidePostsInfoWriterMap = Maps.newHashMap();
     private StatisticsWriter statsWriter;
     private final Pair<Long, GuidePostsInfoBuilder> cachedGuidePosts;
@@ -222,12 +221,12 @@ class DefaultStatisticsCollector implements StatisticsCollector {
         try {
             ArrayList<Mutation> mutations = new ArrayList<Mutation>();
             writeStatistics(region, true, mutations, EnvironmentEdgeManager.currentTimeMillis(), scan);
-            if (logger.isDebugEnabled()) {
-                logger.debug("Committing new stats for the region " + region.getRegionInfo());
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Committing new stats for the region " + region.getRegionInfo());
             }
             commitStats(mutations);
         } catch (IOException e) {
-            logger.error("Unable to commit new stats", e);
+            LOGGER.error("Unable to commit new stats", e);
         }
     }
 
@@ -259,13 +258,13 @@ class DefaultStatisticsCollector implements StatisticsCollector {
             }
             for (ImmutableBytesPtr fam : fams) {
                 if (delete) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Deleting the stats for the region " + region.getRegionInfo());
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug("Deleting the stats for the region " + region.getRegionInfo());
                     }
                     statsWriter.deleteStatsForRegion(region, this, fam, mutations);
                 }
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Adding new stats for the region " + region.getRegionInfo());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Adding new stats for the region " + region.getRegionInfo());
                 }
                 // If we've disabled stats, don't write any, just delete them
                 if (this.guidePostDepth > 0) {
@@ -273,7 +272,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
                 }
             }
         } catch (IOException e) {
-            logger.error("Failed to update statistics table!", e);
+            LOGGER.error("Failed to update statistics table!", e);
             throw e;
         }
     }
@@ -348,8 +347,8 @@ class DefaultStatisticsCollector implements StatisticsCollector {
     public InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, Store store,
             InternalScanner s) throws IOException {
         // See if this is for Major compaction
-        if (logger.isDebugEnabled()) {
-            logger.debug("Compaction scanner created for stats");
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("Compaction scanner created for stats");
         }
         ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getFamily().getName());
         // Potentially perform a cross region server get in order to use the correct guide posts
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index f0b7998..d9239ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -150,23 +150,27 @@ public class StatisticsScanner implements InternalScanner {
                 ArrayList<Mutation> mutations = new ArrayList<Mutation>();
 
                 if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
+                    LOGGER.debug("Deleting the stats for the region "
+                            + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations);
                 if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
+                    LOGGER.debug("Adding new stats for the region " +
+                            regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().addStats(tracker, family, mutations);
                 if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
+                    LOGGER.debug("Committing new stats for the region " +
+                            regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().commitStats(mutations, tracker);
             } catch (IOException e) {
                 if (getRegionServerServices().isStopping() || getRegionServerServices().isStopped()) {
-                    LOGGER.debug("Ignoring error updating statistics because region is closing/closed");
+                    LOGGER.debug(
+                            "Ignoring error updating statistics because region is closing/closed");
                 } else {
                     LOGGER.error("Failed to update statistics table!", e);
                     toThrow = e;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 88cc642..5d8d844 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -222,8 +222,8 @@ public class TraceReader {
             }
         }
         if (cols.size() < count) {
-            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
-                    + " tags from rquest " + request));
+            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count +
+                    ", but only got " + cols.size() + " tags from rquest " + request));
         }
         return cols;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index d235d4b..a04fed0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -53,7 +53,6 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     @Override
     public void commitDDLFence(PTable dataTable) throws SQLException {
         // TODO Auto-generated method stub
-
     }
 
     @Override
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 5b3c9b8..90d96d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -63,7 +63,7 @@ import org.slf4j.LoggerFactory;
 import com.google.common.collect.Lists;
 
 public class TephraTransactionContext implements PhoenixTransactionContext {
-    private static final Logger logger = LoggerFactory.getLogger(TephraTransactionContext.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(TephraTransactionContext.class);
     private static final TransactionCodec CODEC = new TransactionCodec();
 
     private final List<TransactionAware> txAwares;
@@ -207,8 +207,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
                     txServiceClient);
             fenceWait.await(10000, TimeUnit.MILLISECONDS);
 
-            if (logger.isInfoEnabled()) {
-                logger.info("Added write fence at ~"
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info("Added write fence at ~"
                         + getCurrentTransaction().getReadPointer());
             }
         } catch (InterruptedException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
index d042fac..9cadab9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
@@ -176,7 +176,8 @@ public class EquiDepthStreamHistogram {
             smallerBar.incrementCount(countToDistribute);
         }
         if (LOGGER.isTraceEnabled()) {
-            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
+            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s",
+                    origBar, newLeft, newRight));
         }
         bars.remove(origBar);
         bars.add(newLeft);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 1dda818..0c1f365 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -78,7 +78,7 @@ import com.google.protobuf.ServiceException;
 
 
 public class MetaDataUtil {
-    private static final Logger logger = LoggerFactory.getLogger(MetaDataUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataUtil.class);
   
     public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_";
     public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_";
@@ -550,15 +550,15 @@ public class MetaDataUtil {
                     admin.getRegionInfo(null, request);
                 } catch (ServiceException e) {
                     IOException ie = ProtobufUtil.getRemoteException(e);
-                    logger.debug("Region " + loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
+                    LOGGER.debug("Region " + loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
                     return false;
                 } catch (RemoteException e) {
-                    logger.debug("Cannot get region " + loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
+                    LOGGER.debug("Cannot get region " + loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
                     return false;
                 }
             }
         } catch (IOException ex) {
-            logger.warn("tableRegionsOnline failed due to:" + ex);
+            LOGGER.warn("tableRegionsOnline failed due to:" + ex);
             return false;
         } finally {
             if (hcon != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index d6950a2..fe5d045 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -43,7 +43,7 @@ import com.google.common.collect.Maps;
  *
  */
 public class ReadOnlyProps implements Iterable<Entry<String, String>> {
-    private static final Logger logger = LoggerFactory.getLogger(ReadOnlyProps.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ReadOnlyProps.class);
     public static final ReadOnlyProps EMPTY_PROPS = new ReadOnlyProps();
     @Nonnull
     private final Map<String, String> props;
@@ -314,7 +314,7 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
             String value = entry.getValue().toString();
             String oldValue = props.get(key);
             if (!Objects.equal(oldValue, value)) {
-                if (logger.isDebugEnabled()) logger.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
+                if (LOGGER.isDebugEnabled()) LOGGER.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
                 return new ReadOnlyProps(this, overrides);
             }
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index b127408..9f52cf0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -129,7 +129,7 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 public class UpgradeUtil {
-    private static final Logger logger = LoggerFactory.getLogger(UpgradeUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(UpgradeUtil.class);
     private static final byte[] SEQ_PREFIX_BYTES = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_"));
     public static final byte[] UPGRADE_TO_4_7_COLUMN_NAME = Bytes.toBytes("UPGRADE_TO_4_7");
     /**
@@ -250,17 +250,17 @@ public class UpgradeUtil {
                     }
                 }
                 if (sizeBytes >= batchSizeBytes) {
-                    logger.info("Committing bactch of temp rows");
+                    LOGGER.info("Committing bactch of temp rows");
                     target.batch(mutations);
                     mutations.clear();
                     sizeBytes = 0;
                 }
             }
             if (!mutations.isEmpty()) {
-                logger.info("Committing last bactch of temp rows");
+                LOGGER.info("Committing last bactch of temp rows");
                 target.batch(mutations);
             }
-            logger.info("Successfully completed copy");
+            LOGGER.info("Successfully completed copy");
         } catch (SQLException e) {
             throw e;
         } catch (Exception e) {
@@ -272,12 +272,12 @@ public class UpgradeUtil {
                 try {
                     if (source != null) source.close();
                 } catch (IOException e) {
-                    logger.warn("Exception during close of source table",e);
+                    LOGGER.warn("Exception during close of source table",e);
                 } finally {
                     try {
                         if (target != null) target.close();
                     } catch (IOException e) {
-                        logger.warn("Exception during close of target table",e);
+                        LOGGER.warn("Exception during close of target table",e);
                     }
                 }
             }
@@ -292,7 +292,7 @@ public class UpgradeUtil {
             if (nSaltBuckets <= 0) {
                 return;
             }
-            logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
+            LOGGER.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
             HTableDescriptor desc = admin.getTableDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
             createSequenceSnapshot(admin, conn);
             snapshotCreated = true;
@@ -302,7 +302,7 @@ public class UpgradeUtil {
             admin.createTable(desc, splitPoints);
             restoreSequenceSnapshot(admin, conn);
             success = true;
-            logger.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
+            LOGGER.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
         } catch (IOException e) {
             throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e);
         } finally {
@@ -311,14 +311,14 @@ public class UpgradeUtil {
                     try {
                         deleteSequenceSnapshot(admin);
                     } catch (SQLException e) {
-                        logger.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
+                        LOGGER.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
                     }
                 }
             } finally {
                 try {
                     admin.close();
                 } catch (IOException e) {
-                    logger.warn("Exception while closing admin during pre-split", e);
+                    LOGGER.warn("Exception while closing admin during pre-split", e);
                 }
             }
         }
@@ -440,8 +440,8 @@ public class UpgradeUtil {
                     createIndex.append(")");
                 }
                 createIndex.append(" ASYNC");
-                logger.info("Index creation query is : " + createIndex.toString());
-                logger.info("Dropping the index " + indexTableName
+                LOGGER.info("Index creation query is : " + createIndex.toString());
+                LOGGER.info("Dropping the index " + indexTableName
                     + " to clean up the index details from SYSTEM.CATALOG.");
                 PhoenixConnection localConnection = null;
                 if (tenantId != null) {
@@ -452,9 +452,9 @@ public class UpgradeUtil {
                     (localConnection == null ? globalConnection : localConnection).createStatement().execute(
                         "DROP INDEX IF EXISTS " + indexTableName + " ON "
                                 + SchemaUtil.getTableName(schemaName, dataTableName));
-                    logger.info("Recreating the index " + indexTableName);
+                    LOGGER.info("Recreating the index " + indexTableName);
                     (localConnection == null ? globalConnection : localConnection).createStatement().execute(createIndex.toString());
-                    logger.info("Created the index " + indexTableName);
+                    LOGGER.info("Created the index " + indexTableName);
                 } finally {
                     props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
                     if (localConnection != null) {
@@ -643,12 +643,12 @@ public class UpgradeUtil {
     }
     @SuppressWarnings("deprecation")
     public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, PTable oldTable) throws SQLException {
-        logger.info("Upgrading SYSTEM.SEQUENCE table");
+        LOGGER.info("Upgrading SYSTEM.SEQUENCE table");
 
         byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
         HTableInterface sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
         try {
-            logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
+            LOGGER.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
             KeyValue saltKV = KeyValueUtil.newKeyValue(seqTableKey, 
                     PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
@@ -684,7 +684,7 @@ public class UpgradeUtil {
                         return true;
                     }
                 }
-                logger.info("SYSTEM.SEQUENCE table has already been upgraded");
+                LOGGER.info("SYSTEM.SEQUENCE table has already been upgraded");
                 return false;
             }
             
@@ -702,7 +702,7 @@ public class UpgradeUtil {
                 HTableInterface seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
                 try {
                     boolean committed = false;
-                    logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
+                    LOGGER.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
                     ResultScanner scanner = seqTable.getScanner(scan);
                     try {
                         Result result;
@@ -737,7 +737,7 @@ public class UpgradeUtil {
                                     }
                                 }
                                 if (sizeBytes >= batchSizeBytes) {
-                                    logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
+                                    LOGGER.info("Committing bactch of SYSTEM.SEQUENCE rows");
                                     seqTable.batch(mutations);
                                     mutations.clear();
                                     sizeBytes = 0;
@@ -746,11 +746,11 @@ public class UpgradeUtil {
                             }
                         }
                         if (!mutations.isEmpty()) {
-                            logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
+                            LOGGER.info("Committing last bactch of SYSTEM.SEQUENCE rows");
                             seqTable.batch(mutations);
                         }
                         preSplitSequenceTable(conn, nSaltBuckets);
-                        logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
+                        LOGGER.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
                         success = true;
                         return true;
                     } catch (InterruptedException e) {
@@ -773,10 +773,10 @@ public class UpgradeUtil {
                                         sysTable.put(unsaltPut);
                                         success = true;
                                     } finally {
-                                        if (!success) logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+                                        if (!success) LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                                     }
                                 } else { // We're screwed b/c we've already committed some salted sequences...
-                                    logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+                                    LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                                 }
                             }
                         }
@@ -787,7 +787,7 @@ public class UpgradeUtil {
                     try {
                         seqTable.close();
                     } catch (IOException e) {
-                        logger.warn("Exception during close",e);
+                        LOGGER.warn("Exception during close",e);
                     }
                 }
             }
@@ -798,7 +798,7 @@ public class UpgradeUtil {
             try {
                 sysTable.close();
             } catch (IOException e) {
-                logger.warn("Exception during close",e);
+                LOGGER.warn("Exception during close",e);
             }
         }
         
@@ -850,7 +850,7 @@ public class UpgradeUtil {
         try {
             // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
             metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
-            logger.info("Upgrading metadata to support adding columns to tables with views");
+            LOGGER.info("Upgrading metadata to support adding columns to tables with views");
             String getBaseTableAndViews = "SELECT "
                     + COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, "
                     + TENANT_ID + ", "
@@ -1086,7 +1086,7 @@ public class UpgradeUtil {
         try {
             // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
             metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
-            logger.info("Upgrading metadata to add parent to child links for views");
+            LOGGER.info("Upgrading metadata to add parent to child links for views");
             metaConnection.commit();
             //     physical table 
             //         |  
@@ -1148,7 +1148,7 @@ public class UpgradeUtil {
             }
         }
     }
-    
+
     private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection,
             String tenantId, String schemaName, String viewOrTableName, int baseColumnCount)
             throws SQLException {
@@ -1442,7 +1442,7 @@ public class UpgradeUtil {
             if (isTable && !bypassUpgrade) {
                 String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
                 System.out.println(msg);
-                logger.info(msg);
+                LOGGER.info(msg);
                 admin.disableTable(physicalName);
                 admin.snapshot(snapshotName, physicalName);
                 admin.enableTable(physicalName);
@@ -1457,7 +1457,7 @@ public class UpgradeUtil {
             }
             String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "...";
             System.out.println(msg);
-            logger.info(msg);
+            LOGGER.info(msg);
             ResultSet rs;
             if (!bypassUpgrade) {
                 rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName);
@@ -1511,9 +1511,9 @@ public class UpgradeUtil {
             success = true;
             msg = "Completed upgrade of " + escapedTableName + tenantInfo;
             System.out.println(msg);
-            logger.info(msg);
+            LOGGER.info(msg);
         } catch (Exception e) {
-            logger.error("Exception during upgrade of " + physicalName + ":", e);
+            LOGGER.error("Exception during upgrade of " + physicalName + ":", e);
         } finally {
             boolean restored = false;
             try {
@@ -1523,25 +1523,25 @@ public class UpgradeUtil {
                     admin.enableTable(physicalName);
                     String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
                     System.out.println(msg);
-                    logger.info(msg);
+                    LOGGER.info(msg);
                 }
                 restored = true;
             } catch (Exception e) {
-                logger.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
+                LOGGER.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
             } finally {
                 try {
                     if (restoreSnapshot && restored) {
                         admin.deleteSnapshot(snapshotName);
                     }
                 } catch (Exception e) {
-                    logger.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
+                    LOGGER.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
                 } finally {
                     try {
                         if (admin != null) {
                             admin.close();
                         }
                     } catch (IOException e) {
-                        logger.warn("Unable to close admin after upgrade:", e);
+                        LOGGER.warn("Unable to close admin after upgrade:", e);
                     }
                 }
             }
@@ -1753,7 +1753,7 @@ public class UpgradeUtil {
         }
         if (ts != null) {
             // Update flag to represent table is mapped to namespace
-            logger.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
+            LOGGER.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
                     phoenixTableName));
             Put put = new Put(tableKey, ts);
             put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES,
@@ -1770,19 +1770,19 @@ public class UpgradeUtil {
             boolean destTableExists=admin.tableExists(destTableName);
             if (!destTableExists) {
                 String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
-                logger.info("Disabling table " + srcTableName + " ..");
+                LOGGER.info("Disabling table " + srcTableName + " ..");
                 admin.disableTable(srcTableName);
-                logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
+                LOGGER.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
                 admin.snapshot(snapshotName, srcTableName);
-                logger.info(
+                LOGGER.info(
                         String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
                 admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(destTableName));
-                logger.info(String.format("deleting old table %s..", srcTableName));
+                LOGGER.info(String.format("deleting old table %s..", srcTableName));
                 admin.deleteTable(srcTableName);
-                logger.info(String.format("deleting snapshot %s..", snapshotName));
+                LOGGER.info(String.format("deleting snapshot %s..", snapshotName));
                 admin.deleteSnapshot(snapshotName);
             } else {
-                logger.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
+                LOGGER.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
             }
         }
     }
@@ -1823,15 +1823,15 @@ public class UpgradeUtil {
 
             if (table.isNamespaceMapped()) { throw new IllegalArgumentException("Table is already upgraded"); }
             if (!schemaName.equals("")) {
-                logger.info(String.format("Creating schema %s..", schemaName));
+                LOGGER.info(String.format("Creating schema %s..", schemaName));
                 conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
             }
             String oldPhysicalName = table.getPhysicalName().getString();
             String newPhysicalTablename = SchemaUtil.normalizeIdentifier(
                     SchemaUtil.getPhysicalTableName(oldPhysicalName, readOnlyProps).getNameAsString());
-            logger.info(String.format("Upgrading %s %s..", table.getType(), tableName));
-            logger.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
-            logger.info(String.format("teanantId %s..", conn.getTenantId()));
+            LOGGER.info(String.format("Upgrading %s %s..", table.getType(), tableName));
+            LOGGER.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
+            LOGGER.info(String.format("teanantId %s..", conn.getTenantId()));
             // Upgrade the data or main table
             mapTableToNamespace(admin, metatable, tableName, newPhysicalTablename, readOnlyProps,
                     PhoenixRuntime.getCurrentScn(readOnlyProps), tableName, table.getType(),conn.getTenantId());
@@ -1855,12 +1855,14 @@ public class UpgradeUtil {
                     boolean updateLink = true;
                     if (srcTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
                         // Skip already migrated
-                        logger.info(String.format("skipping as it seems index '%s' is already upgraded..",
+                        LOGGER.info(String.format(
+                                "skipping as it seems index '%s' is already upgraded..",
                                 index.getName()));
                         continue;
                     }
                     if (MetaDataUtil.isLocalIndex(srcTableName)) {
-                        logger.info(String.format("local index '%s' found with physical hbase table name ''..",
+                        LOGGER.info(String.format(
+                                "local index '%s' found with physical hbase table name ''..",
                                 index.getName(), srcTableName));
                         destTableName = Bytes
                                 .toString(MetaDataUtil.getLocalIndexPhysicalName(newPhysicalTablename.getBytes()));
@@ -1869,18 +1871,20 @@ public class UpgradeUtil {
                                 .execute(String.format("ALTER TABLE %s set " + MetaDataUtil.PARENT_TABLE_KEY + "='%s'",
                                         phoenixTableName, table.getPhysicalName()));
                     } else if (MetaDataUtil.isViewIndex(srcTableName)) {
-                        logger.info(String.format("View index '%s' found with physical hbase table name ''..",
+                        LOGGER.info(String.format(
+                                "View index '%s' found with physical hbase table name ''..",
                                 index.getName(), srcTableName));
                         destTableName = Bytes
                                 .toString(MetaDataUtil.getViewIndexPhysicalName(newPhysicalTablename.getBytes()));
                     } else {
-                        logger.info(String.format("Global index '%s' found with physical hbase table name ''..",
+                        LOGGER.info(String.format(
+                                "Global index '%s' found with physical hbase table name ''..",
                                 index.getName(), srcTableName));
                         destTableName = SchemaUtil
                                 .getPhysicalTableName(index.getPhysicalName().getString(), readOnlyProps)
                                 .getNameAsString();
                     }
-                    logger.info(String.format("Upgrading index %s..", index.getName()));
+                    LOGGER.info(String.format("Upgrading index %s..", index.getName()));
                     if (!(table.getType() == PTableType.VIEW && !MetaDataUtil.isViewIndex(srcTableName)
                             && IndexType.LOCAL != index.getIndexType())) {
                         mapTableToNamespace(admin, metatable, srcTableName, destTableName, readOnlyProps,
@@ -1888,7 +1892,7 @@ public class UpgradeUtil {
                                 conn.getTenantId());
                     }
                     if (updateLink) {
-                        logger.info(String.format("Updating link information for index '%s' ..", index.getName()));
+                        LOGGER.info(String.format("Updating link information for index '%s' ..", index.getName()));
                         updateLink(conn, srcTableName, destTableName,index.getSchemaName(),index.getTableName());
                         conn.commit();
                     }
@@ -1904,10 +1908,9 @@ public class UpgradeUtil {
                 throw new RuntimeException("Error: problem occured during upgrade. Table is not upgraded successfully");
             }
             if (table.getType() == PTableType.VIEW) {
-                logger.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
+                LOGGER.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
                 updateLink(conn, oldPhysicalName, newPhysicalTablename,table.getSchemaName(),table.getTableName());
                 conn.commit();
-
                 conn.getQueryServices().clearTableFromCache(
                     conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes(),
                     table.getSchemaName().getBytes(), table.getTableName().getBytes(),
@@ -1991,7 +1994,7 @@ public class UpgradeUtil {
                 conn = DriverManager.getConnection(conn.getURL(), props).unwrap(PhoenixConnection.class);
             }
             String viewName=SchemaUtil.getTableName(rs.getString(2), rs.getString(3));
-            logger.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
+            LOGGER.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
             UpgradeUtil.upgradeTable(conn, viewName);
             prevTenantId = tenantId;
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
index 87e3997..e038e06 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
@@ -109,7 +109,8 @@ public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
             if (LOGGER.isDebugEnabled()) {
                 // Even though this is an error we only log it with debug logging because we're notifying the
                 // listener, and it can do its own logging if needed
-                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+                LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex +
+                        ", colName " + colName, e);
             }
             upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
         }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 2581e9e..1c528a9 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -51,14 +51,14 @@ import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.util.ScanUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.junit.Rule;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestIndexWriter {
   private static final Logger LOGGER = LoggerFactory.getLogger(TestIndexWriter.class);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 8c626df..33f23f6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -182,7 +182,7 @@ public abstract class BaseTest {
     public static final String DRIVER_CLASS_NAME_ATTRIB = "phoenix.driver.class.name";
     
     private static final Map<String,String> tableDDLMap;
-    private static final Logger logger = LoggerFactory.getLogger(BaseTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(BaseTest.class);
     @ClassRule
     public static TemporaryFolder tmpFolder = new TemporaryFolder();
     private static final int dropTableTimeout = 300; // 5 mins should be long enough.
@@ -455,7 +455,7 @@ public abstract class BaseTest {
             try {
                 assertTrue(destroyDriver(driver));
             } catch (Throwable t) {
-                logger.error("Exception caught when destroying phoenix test driver", t);
+                LOGGER.error("Exception caught when destroying phoenix test driver", t);
             } finally {
                 driver = null;
             }
@@ -485,18 +485,18 @@ public abstract class BaseTest {
                         try {
                             u.shutdownMiniMapReduceCluster();
                         } catch (Throwable t) {
-                            logger.error(
+                            LOGGER.error(
                                 "Exception caught when shutting down mini map reduce cluster", t);
                         } finally {
                             try {
                                 u.shutdownMiniCluster();
                             } catch (Throwable t) {
-                                logger.error("Exception caught when shutting down mini cluster", t);
+                                LOGGER.error("Exception caught when shutting down mini cluster", t);
                             } finally {
                                 try {
                                     ConnectionFactory.shutdown();
                                 } finally {
-                                    logger.info(
+                                    LOGGER.info(
                                         "Time in seconds spent in shutting down mini cluster with "
                                                 + numTables + " tables: "
                                                 + (System.currentTimeMillis() - startTime) / 1000);
@@ -669,7 +669,7 @@ public abstract class BaseTest {
                     DriverManager.deregisterDriver(driver);
                 }
             } catch (Exception e) {
-                logger.warn("Unable to close registered driver: " + driver, e);
+                LOGGER.warn("Unable to close registered driver: " + driver, e);
             }
         }
         return false;
@@ -757,7 +757,7 @@ public abstract class BaseTest {
         if (TABLE_COUNTER.get() > TEARDOWN_THRESHOLD) {
             int numTables = TABLE_COUNTER.get();
             TABLE_COUNTER.set(0);
-            logger.info(
+            LOGGER.info(
                 "Shutting down mini cluster because number of tables on this mini cluster is likely greater than "
                         + TEARDOWN_THRESHOLD);
             tearDownMiniClusterAsync(numTables);
@@ -929,9 +929,9 @@ public abstract class BaseTest {
                 try {
                     conn.createStatement().executeUpdate(ddl);
                 } catch (NewerTableAlreadyExistsException ex) {
-                    logger.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion");
+                    LOGGER.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion");
                 } catch (TableNotFoundException ex) {
-                    logger.info("Table " + fullTableName + " is already deleted.");
+                    LOGGER.info("Table " + fullTableName + " is already deleted.");
                 }
             }
             rs.close();
@@ -981,7 +981,7 @@ public abstract class BaseTest {
                 lastTenantId = tenantId;
             }
 
-            logger.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
+            LOGGER.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
             conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
         }
         rs.close();
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
index 456b038..a4efdeb 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
@@ -30,6 +30,8 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -47,8 +49,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.phoenix.tool.PhoenixCanaryTool.propFileName;
 import static org.junit.Assert.assertFalse;
@@ -58,7 +58,8 @@ import static org.junit.Assert.assertTrue;
 @Category(NeedsOwnMiniClusterTest.class)
 public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
 
-	private static final Logger LOGGER = LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
+	private static final Logger LOGGER =
+			LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
 	private static final String stdOutSink
 			= "org.apache.phoenix.tool.PhoenixCanaryTool$StdOutSink";
 	private static final String fileOutSink
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index 51d6743..2b55e29 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -49,7 +49,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class Pherf {
-    private static final Logger logger = LoggerFactory.getLogger(Pherf.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(Pherf.class);
     private static final Options options = new Options();
     private final PhoenixUtil phoenixUtil = PhoenixUtil.create();
 
@@ -142,8 +142,8 @@ public class Pherf {
                         properties.getProperty("pherf.default.monitorFrequency");
         properties.setProperty("pherf.default.monitorFrequency", monitorFrequency);
 
-        logger.debug("Using Monitor: " + monitor);
-        logger.debug("Monitor Frequency Ms:" + monitorFrequency);
+        LOGGER.debug("Using Monitor: " + monitor);
+        LOGGER.debug("Monitor Frequency Ms:" + monitorFrequency);
         preLoadData = command.hasOption("l");
         executeQuerySets = command.hasOption("q");
         zookeeper = command.getOptionValue("z", "localhost");
@@ -184,10 +184,10 @@ public class Pherf {
         }
         PhoenixUtil.setRowCountOverride(rowCountOverride);
         if (!thinDriver) {
-            logger.info("Using thick driver with ZooKeepers '{}'", zookeeper);
+            LOGGER.info("Using thick driver with ZooKeepers '{}'", zookeeper);
             PhoenixUtil.setZookeeper(zookeeper);
         } else {
-            logger.info("Using thin driver with PQS '{}'", queryServerUrl);
+            LOGGER.info("Using thin driver with PQS '{}'", queryServerUrl);
             // Enables the thin-driver and sets the PQS URL
             PhoenixUtil.useThinDriver(queryServerUrl);
         }
@@ -230,7 +230,7 @@ public class Pherf {
             
             // Compare results and exit  
 			if (null != compareResults) {
-				logger.info("\nStarting to compare results and exiting for " + compareResults);
+                LOGGER.info("\nStarting to compare results and exiting for " + compareResults);
 				new GoogleChartGenerator(compareResults, compareType).readAndRender();
 				return;
             }
@@ -239,7 +239,7 @@ public class Pherf {
 
             // Drop tables with PHERF schema and regex comparison
             if (null != dropPherfTablesRegEx) {
-                logger.info(
+                LOGGER.info(
                         "\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx
                                 + " regex expression.");
                 phoenixUtil.deleteTables(dropPherfTablesRegEx);
@@ -253,7 +253,7 @@ public class Pherf {
             }
 
             if (applySchema) {
-                logger.info("\nStarting to apply schema...");
+                LOGGER.info("\nStarting to apply schema...");
                 SchemaReader
                         reader =
                         (schemaFile == null) ?
@@ -264,7 +264,7 @@ public class Pherf {
 
             // Schema and Data Load
             if (preLoadData) {
-                logger.info("\nStarting Data Load...");
+                LOGGER.info("\nStarting Data Load...");
                 Workload workload = new WriteWorkload(parser, generateStatistics);
                 try {
                     workloadExecutor.add(workload);
@@ -277,26 +277,26 @@ public class Pherf {
                     }
                 }
             } else {
-                logger.info(
+                LOGGER.info(
                         "\nSKIPPED: Data Load and schema creation as -l argument not specified");
             }
 
             // Execute multi-threaded query sets
             if (executeQuerySets) {
-                logger.info("\nStarting to apply Execute Queries...");
+                LOGGER.info("\nStarting to apply Execute Queries...");
 
                 workloadExecutor
                         .add(new QueryExecutor(parser, phoenixUtil, workloadExecutor, parser.getDataModels(), queryHint,
                                 isFunctional, writeRuntimeResults));
 
             } else {
-                logger.info(
+                LOGGER.info(
                         "\nSKIPPED: Multithreaded query set execution as -q argument not specified");
             }
 
             // Clean up the monitor explicitly
             if (monitorManager != null) {
-                logger.info("Run completed. Shutting down Monitor.");
+                LOGGER.info("Run completed. Shutting down Monitor.");
                 monitorManager.complete();
             }
 
@@ -305,7 +305,7 @@ public class Pherf {
 
         } finally {
             if (workloadExecutor != null) {
-                logger.info("Run completed. Shutting down thread pool.");
+                LOGGER.info("Run completed. Shutting down thread pool.");
                 workloadExecutor.shutdown();
             }
         }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
index 8f2a1d8..87b4403 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
 
 public class XMLConfigParser {
 
-    private static final Logger logger = LoggerFactory.getLogger(XMLConfigParser.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParser.class);
     private String filePattern;
     private List<DataModel> dataModels;
     private List<Scenario> scenarios = null;
@@ -96,7 +96,7 @@ public class XMLConfigParser {
                     scenarios.add(scenario);
                 }
             } catch (JAXBException e) {
-                logger.error("Unable to parse scenario file "+path, e);
+                LOGGER.error("Unable to parse scenario file "+path, e);
                 throw e;
             }
         }
@@ -122,7 +122,7 @@ public class XMLConfigParser {
         JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class);
         Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
         String fName = PherfConstants.RESOURCE_SCENARIO + "/" + file.getFileName().toString();
-        logger.info("Open config file: " + fName);
+        LOGGER.info("Open config file: " + fName);
         XMLStreamReader xmlReader = xif.createXMLStreamReader(
             new StreamSource(XMLConfigParser.class.getResourceAsStream(fName)));
         return (DataModel) jaxbUnmarshaller.unmarshal(xmlReader);
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
index 929f96a..1cf740e 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
@@ -31,7 +31,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 public class ResultManager {
-    private static final Logger logger = LoggerFactory.getLogger(ResultManager.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ResultManager.class);
 
     private final List<ResultHandler> resultHandlers;
     private final ResultUtil util;
@@ -153,7 +153,7 @@ public class ResultManager {
                 handler.flush();
             } catch (Exception e) {
                 e.printStackTrace();
-                logger.warn("Could not flush handler: "
+                LOGGER.warn("Could not flush handler: "
                         + handler.getResultFileName() + " : " + e.getMessage());
             }
         }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
index 6d1e727..2597d0c 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
@@ -39,7 +39,7 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
 
 public class RulesApplier {
-    private static final Logger logger = LoggerFactory.getLogger(RulesApplier.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(RulesApplier.class);
     private static final AtomicLong COUNTER = new AtomicLong(0);
 
     // Used to bail out of random distribution if it takes too long
@@ -116,7 +116,7 @@ public class RulesApplier {
         List<Scenario> scenarios = parser.getScenarios();
         DataValue value = null;
         if (scenarios.contains(scenario)) {
-            logger.debug("We found a correct Scenario");
+            LOGGER.debug("We found a correct Scenario");
             
             Map<DataTypeMapping, List> overrideRuleMap = this.getCachedScenarioOverrides(scenario);
             
@@ -124,7 +124,7 @@ public class RulesApplier {
 	            List<Column> overrideRuleList = this.getCachedScenarioOverrides(scenario).get(phxMetaColumn.getType());
 	            
 				if (overrideRuleList != null && overrideRuleList.contains(phxMetaColumn)) {
-					logger.debug("We found a correct override column rule");
+                    LOGGER.debug("We found a correct override column rule");
 					Column columnRule = getColumnForRuleOverride(overrideRuleList, phxMetaColumn);
 					if (columnRule != null) {
 						return getDataValue(columnRule);
@@ -139,12 +139,12 @@ public class RulesApplier {
             // Make sure Column from Phoenix Metadata matches a rule column
             if (ruleList.contains(phxMetaColumn)) {
                 // Generate some random data based on this rule
-                logger.debug("We found a correct column rule");
+                LOGGER.debug("We found a correct column rule");
                 Column columnRule = getColumnForRule(ruleList, phxMetaColumn);
 
                 value = getDataValue(columnRule);
             } else {
-                logger.warn("Attempted to apply rule to data, but could not find a rule to match type:"
+                LOGGER.warn("Attempted to apply rule to data, but could not find a rule to match type:"
                                 + phxMetaColumn.getType()
                 );
             }
@@ -177,7 +177,7 @@ public class RulesApplier {
         }
 
         if ((prefix.length() >= length) && (length > 0)) {
-            logger.warn("You are attempting to generate data with a prefix (" + prefix + ") "
+            LOGGER.warn("You are attempting to generate data with a prefix (" + prefix + ") "
                     + "That is longer than expected overall field length (" + length + "). "
                     + "This will certainly lead to unexpected data values.");
         }
@@ -352,7 +352,7 @@ public class RulesApplier {
             // While it's possible to get here if you have a bunch of really small distributions,
             // It's just really unlikely. This is just a safety just so we actually pick a value.
             if(count++ == OH_SHIT_LIMIT){
-                logger.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT);
+                LOGGER.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT);
                 generatedDataValue = valueRule;
             }
 
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
index 5ccdaaa..53c4408 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
@@ -33,7 +33,7 @@ import java.sql.Connection;
 import java.util.Collection;
 
 public class SchemaReader {
-    private static final Logger logger = LoggerFactory.getLogger(SchemaReader.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(SchemaReader.class);
     private final PhoenixUtil pUtil;
     private Collection<Path> resourceList;
     private final String searchPattern;
@@ -64,7 +64,7 @@ public class SchemaReader {
         try {
             connection = pUtil.getConnection(null);
             for (Path file : resourceList) {
-                logger.info("\nApplying schema to file: " + file);
+                LOGGER.info("\nApplying schema to file: " + file);
                 pUtil.executeStatement(resourceToString(file), connection);
             }
         } finally {
@@ -88,12 +88,12 @@ public class SchemaReader {
     }
 
     private void read() throws Exception {
-        logger.debug("Trying to match resource pattern: " + searchPattern);
+        LOGGER.debug("Trying to match resource pattern: " + searchPattern);
         System.out.println("Trying to match resource pattern: " + searchPattern);
 
         resourceList = null;
         resourceList = resourceUtil.getResourceList(searchPattern);
-        logger.info("File resourceList Loaded: " + resourceList);
+        LOGGER.info("File resourceList Loaded: " + resourceList);
         System.out.println("File resourceList Loaded: " + resourceList);
         if (resourceList.isEmpty()) {
             throw new FileLoaderException("Could not load Schema Files");
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
index 72ab3e0..43ba8ba 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
@@ -44,7 +44,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 
 public class PhoenixUtil {
-    private static final Logger logger = LoggerFactory.getLogger(PhoenixUtil.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixUtil.class);
     private static String zookeeper;
     private static int rowCountOverride = 0;
     private boolean testEnabled;
@@ -106,7 +106,7 @@ public class PhoenixUtil {
             Properties props = new Properties();
             if (null != tenantId) {
                 props.setProperty("TenantId", tenantId);
-                logger.debug("\nSetting tenantId to " + tenantId);
+                LOGGER.debug("\nSetting tenantId to " + tenantId);
             }
             String url = "jdbc:phoenix:thin:url=" + queryServerUrl + ";serialization=PROTOBUF";
             return DriverManager.getConnection(url, props);
@@ -118,7 +118,7 @@ public class PhoenixUtil {
             Properties props = new Properties();
             if (null != tenantId) {
                 props.setProperty("TenantId", tenantId);
-                logger.debug("\nSetting tenantId to " + tenantId);
+                LOGGER.debug("\nSetting tenantId to " + tenantId);
             }
             
             if (phoenixProperty != null) {
@@ -223,12 +223,12 @@ public class PhoenixUtil {
                         + "."
                         + resultSet.getString(TABLE_NAME);
                 if (tableName.matches(regexMatch)) {
-                    logger.info("\nDropping " + tableName);
+                    LOGGER.info("\nDropping " + tableName);
                     try {
                         executeStatementThrowException("DROP TABLE "
                                 + tableName + " CASCADE", conn);
                     } catch (org.apache.phoenix.schema.TableNotFoundException tnf) {
-                        logger.error("Table might be already be deleted via cascade. Schema: "
+                        LOGGER.error("Table might be already be deleted via cascade. Schema: "
                                 + tnf.getSchemaName()
                                 + " Table: "
                                 + tnf.getTableName());
@@ -288,7 +288,7 @@ public class PhoenixUtil {
             if (null != query.getDdl()) {
                 Connection conn = null;
                 try {
-                    logger.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
+                    LOGGER.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
                             .getTenantId());
                     executeStatement(query.getDdl(),
                             conn = getConnection(query.getTenantId()));
@@ -312,7 +312,7 @@ public class PhoenixUtil {
             Connection conn = null;
             try {
             	for (Ddl ddl : ddls) {
-	                logger.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId);
+                    LOGGER.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId);
 	                long startTime = System.currentTimeMillis();
 	                executeStatement(ddl.toString(), conn = getConnection(tenantId));
 	                if (ddl.getStatement().toUpperCase().contains(ASYNC_KEYWORD)) {
@@ -362,10 +362,10 @@ public class PhoenixUtil {
      */
     boolean isYarnJobInProgress(String tableName) {
 		try {
-			logger.info("Fetching YARN apps...");
+            LOGGER.info("Fetching YARN apps...");
 			Set<String> response = new PhoenixMRJobSubmitter().getSubmittedYarnApps();
 			for (String str : response) {
-				logger.info("Runnng YARN app: " + str);
+                LOGGER.info("Runnng YARN app: " + str);
 				if (str.toUpperCase().contains(tableName.toUpperCase())) {
 					return true;
 				}
@@ -382,7 +382,7 @@ public class PhoenixUtil {
     }
 
     public static void setZookeeper(String zookeeper) {
-        logger.info("Setting zookeeper: " + zookeeper);
+        LOGGER.info("Setting zookeeper: " + zookeeper);
         useThickDriver(zookeeper);
     }
 
@@ -406,7 +406,7 @@ public class PhoenixUtil {
      * @throws Exception
      */
     public void updatePhoenixStats(String tableName, Scenario scenario) throws Exception {
-        logger.info("Updating stats for " + tableName);
+        LOGGER.info("Updating stats for " + tableName);
         executeStatement("UPDATE STATISTICS " + tableName, scenario);
     }
 
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
index 0b54641..df5dbf7 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
@@ -40,7 +40,7 @@ import java.util.zip.ZipFile;
  * list resources available from the classpath @ *
  */
 public class ResourceList {
-    private static final Logger logger = LoggerFactory.getLogger(ResourceList.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ResourceList.class);
     private final String rootResourceDir;
 
     public ResourceList(String rootResourceDir) {
@@ -84,10 +84,10 @@ public class ResourceList {
 
             String rName = rootResourceDir + resource;
 
-            logger.debug("Trying with the root append.");
+            LOGGER.debug("Trying with the root append.");
             url = ResourceList.class.getResource(rName);
             if (url == null) {
-                logger.debug("Failed! Must be using a jar. Trying without the root append.");
+                LOGGER.debug("Failed! Must be using a jar. Trying without the root append.");
                 url = ResourceList.class.getResource(resource);
 
                 if (url == null) {
@@ -99,7 +99,7 @@ public class ResourceList {
             } else {
                 path = Paths.get(url.toURI());
             }
-            logger.debug("Found the correct resource: " + path.toString());
+            LOGGER.debug("Found the correct resource: " + path.toString());
             paths.add(path);
         }
 
@@ -143,11 +143,11 @@ public class ResourceList {
             final ZipEntry ze = (ZipEntry) e.nextElement();
             final String fileName = ze.getName();
             final boolean accept = pattern.matcher(fileName).matches();
-            logger.trace("fileName:" + fileName);
-            logger.trace("File:" + file.toString());
-            logger.trace("Match:" + accept);
+            LOGGER.trace("fileName:" + fileName);
+            LOGGER.trace("File:" + file.toString());
+            LOGGER.trace("Match:" + accept);
             if (accept) {
-                logger.trace("Adding File from Jar: " + fileName);
+                LOGGER.trace("Adding File from Jar: " + fileName);
                 retVal.add("/" + fileName);
             }
         }
@@ -171,7 +171,7 @@ public class ResourceList {
                 final String fileName = file.getName();
                 final boolean accept = pattern.matcher(file.toString()).matches();
                 if (accept) {
-                    logger.debug("Adding File from directory: " + fileName);
+                    LOGGER.debug("Adding File from directory: " + fileName);
                     retval.add("/" + fileName);
                 }
             }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
index 4423bbd..ecc432b 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
@@ -40,7 +40,7 @@ import org.apache.phoenix.pherf.configuration.XMLConfigParser;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
 
 class MultiThreadedRunner implements Callable<Void> {
-    private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadedRunner.class);
     private Query query;
     private ThreadTime threadTime;
     private PhoenixUtil pUtil = PhoenixUtil.create();
@@ -87,7 +87,7 @@ class MultiThreadedRunner implements Callable<Void> {
      */
     @Override
     public Void call() throws Exception {
-        logger.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
+        LOGGER.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
                 + numberOfExecutions + "times\n\n");
         Long start = System.currentTimeMillis();
         for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
@@ -106,7 +106,7 @@ class MultiThreadedRunner implements Callable<Void> {
             resultManager.flush();
         }
 
-        logger.info("\n\nThread exiting." + threadName + "\n\n");
+        LOGGER.info("\n\nThread exiting." + threadName + "\n\n");
         return null;
     }
 
@@ -137,7 +137,7 @@ class MultiThreadedRunner implements Callable<Void> {
             conn.setAutoCommit(true);
             final String statementString = query.getDynamicStatement(ruleApplier, scenario);
             statement = conn.prepareStatement(statementString);
-            logger.info("Executing: " + statementString);
+            LOGGER.info("Executing: " + statementString);
             
             if (scenario.getWriteParams() != null) {
             	Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, scenario, GeneratePhoenixStats.NO);
@@ -165,7 +165,7 @@ class MultiThreadedRunner implements Callable<Void> {
                 conn.commit();
             }
         } catch (Exception e) {
-            logger.error("Exception while executing query", e);
+            LOGGER.error("Exception while executing query", e);
             exception = e.getMessage();
             throw e;
         } finally {
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index ef2e167..26429a5 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -32,7 +32,6 @@ import org.slf4j.LoggerFactory;
 
 class MultithreadedDiffer implements Callable<Void> {
     private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class);
-
     private Thread t;
     private Query query;
     private ThreadTime threadTime;
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
index c4a3517..d894a96 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
@@ -36,7 +36,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
 public class QueryExecutor implements Workload {
-    private static final Logger logger = LoggerFactory.getLogger(QueryExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryExecutor.class);
     private List<DataModel> dataModels;
     private String queryHint;
     private final boolean exportCSV;
@@ -113,7 +113,7 @@ public class QueryExecutor implements Workload {
                         }
                     }
                 } catch (Exception e) {
-                    logger.error("Scenario throws exception", e);
+                    LOGGER.error("Scenario throws exception", e);
                     throw e;
                 }
                 return null;
@@ -165,7 +165,7 @@ public class QueryExecutor implements Workload {
                     resultManager.write(dataModelResults, ruleApplier);
                     resultManager.flush();
                 } catch (Exception e) {
-                    logger.error("Scenario throws exception", e);
+                    LOGGER.error("Scenario throws exception", e);
                     throw e;
                 }
                 return null;
@@ -255,7 +255,7 @@ public class QueryExecutor implements Workload {
         queryResult.getThreadTimes().add(threadTime);
         threadTime.setThreadName(name);
         queryResult.setHint(this.queryHint);
-        logger.info("\nExecuting query " + queryResult.getStatement());
+        LOGGER.info("\nExecuting query " + queryResult.getStatement());
         Callable<Void> thread;
         if (workloadExecutor.isPerformance()) {
             thread =
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
index 7b2bb12..786f778 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
@@ -44,7 +44,7 @@ import difflib.Patch;
 
 public class QueryVerifier {
     private PhoenixUtil pUtil = PhoenixUtil.create();
-    private static final Logger logger = LoggerFactory.getLogger(QueryVerifier.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(QueryVerifier.class);
     private boolean useTemporaryOutput;
     private String directoryLocation;
 
@@ -110,10 +110,10 @@ public class QueryVerifier {
 
         Patch patch = DiffUtils.diff(original, newLines);
         if (patch.getDeltas().isEmpty()) {
-            logger.info("Match: " + query.getId() + " with " + newCSV);
+            LOGGER.info("Match: " + query.getId() + " with " + newCSV);
             return true;
         } else {
-            logger.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
+            LOGGER.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
             return false;
         }
     }
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
index 4abb574..ff599b8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
@@ -31,7 +31,7 @@ import java.util.Properties;
 import java.util.concurrent.*;
 
 public class WorkloadExecutor {
-    private static final Logger logger = LoggerFactory.getLogger(WorkloadExecutor.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExecutor.class);
     private final int poolSize;
     private final boolean isPerformance;
 
@@ -87,7 +87,7 @@ public class WorkloadExecutor {
             future.get();
             jobs.remove(workload);
         } catch (InterruptedException | ExecutionException e) {
-            logger.error("", e);
+            LOGGER.error("", e);
         }
     }
 
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
index cae223c..3df5fe8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -52,7 +52,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class WriteWorkload implements Workload {
-    private static final Logger logger = LoggerFactory.getLogger(WriteWorkload.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(WriteWorkload.class);
 
     public static final String USE_BATCH_API_PROPERTY = "pherf.default.dataloader.batchApi";
 
@@ -169,7 +169,7 @@ public class WriteWorkload implements Workload {
                     resultUtil.write(dataLoadThreadTime);
 
                 } catch (Exception e) {
-                    logger.error("WriteWorkLoad failed", e);
+                    LOGGER.error("WriteWorkLoad failed", e);
                     throw e;
                 }
                 return null;
@@ -179,7 +179,7 @@ public class WriteWorkload implements Workload {
 
     private synchronized void exec(DataLoadTimeSummary dataLoadTimeSummary,
             DataLoadThreadTime dataLoadThreadTime, Scenario scenario) throws Exception {
-        logger.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
+        LOGGER.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
         
         // Execute any pre dataload scenario DDLs
         pUtil.executeScenarioDdl(scenario.getPreScenarioDdls(), scenario.getTenantId(), dataLoadTimeSummary);
@@ -190,11 +190,11 @@ public class WriteWorkload implements Workload {
 
         // Update Phoenix Statistics
         if (this.generateStatistics == GeneratePhoenixStats.YES) {
-        	logger.info("Updating Phoenix table statistics...");
+            LOGGER.info("Updating Phoenix table statistics...");
         	pUtil.updatePhoenixStats(scenario.getTableName(), scenario);
-        	logger.info("Stats update done!");
+            LOGGER.info("Stats update done!");
         } else {
-        	logger.info("Phoenix table stats update not requested.");
+            LOGGER.info("Phoenix table stats update not requested.");
         }
         
         // Execute any post data load scenario DDLs before starting query workload
@@ -214,7 +214,7 @@ public class WriteWorkload implements Workload {
                     pUtil.getColumnsFromPhoenix(scenario.getSchemaName(),
                             scenario.getTableNameWithoutSchemaName(), pUtil.getConnection(scenario.getTenantId()));
             int threadRowCount = rowCalculator.getNext();
-            logger.info(
+            LOGGER.info(
                     "Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows.");
             Future<Info>
                     write =
@@ -239,11 +239,11 @@ public class WriteWorkload implements Workload {
             Info writeInfo = write.get();
             sumRows += writeInfo.getRowCount();
             sumDuration += writeInfo.getDuration();
-            logger.info("Executor (" + this.hashCode() + ") writes complete with row count ("
+            LOGGER.info("Executor (" + this.hashCode() + ") writes complete with row count ("
                     + writeInfo.getRowCount() + ") in Ms (" + writeInfo.getDuration() + ")");
         }
         long testDuration = System.currentTimeMillis() - start;
-        logger.info("Writes completed with total row count (" + sumRows
+        LOGGER.info("Writes completed with total row count (" + sumRows
                 + ") with total elapsed time of (" + testDuration
                 + ") ms and total CPU execution time of (" + sumDuration + ") ms");
         dataLoadTimeSummary
@@ -296,7 +296,7 @@ public class WriteWorkload implements Workload {
                             }
                             connection.commit();
                             duration = System.currentTimeMillis() - last;
-                            logger.info("Writer (" + Thread.currentThread().getName()
+                            LOGGER.info("Writer (" + Thread.currentThread().getName()
                                     + ") committed Batch. Total " + getBatchSize()
                                     + " rows for this thread (" + this.hashCode() + ") in ("
                                     + duration + ") Ms");
@@ -315,7 +315,7 @@ public class WriteWorkload implements Workload {
                         }
                     }
                 } catch (SQLException e) {
-                    logger.error("Scenario " + scenario.getName() + " failed with exception ", e);
+                    LOGGER.error("Scenario " + scenario.getName() + " failed with exception ", e);
                     throw e;
                 } finally {
                     // Need to keep the statement open to send the remaining batch of updates
@@ -342,7 +342,7 @@ public class WriteWorkload implements Workload {
                         try {
                             connection.commit();
                             duration = System.currentTimeMillis() - start;
-                            logger.info("Writer ( " + Thread.currentThread().getName()
+                            LOGGER.info("Writer ( " + Thread.currentThread().getName()
                                     + ") committed Final Batch. Duration (" + duration + ") Ms");
                             connection.close();
                         } catch (SQLException e) {
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
index 5afde69..a2b5b63 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
@@ -38,7 +38,7 @@ import javax.xml.bind.Marshaller;
 import static org.junit.Assert.*;
 
 public class ConfigurationParserTest extends ResultBaseTest {
-    private static final Logger logger = LoggerFactory.getLogger(ConfigurationParserTest.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(ConfigurationParserTest.class);
 
     @Test
     public void testReadWriteWorkloadReader() throws Exception {
@@ -65,7 +65,7 @@ public class ConfigurationParserTest extends ResultBaseTest {
     public void testConfigReader() {
         try {
 
-            logger.debug("DataModel: " + writeXML());
+            LOGGER.debug("DataModel: " + writeXML());
             List<Scenario> scenarioList = getScenarios();
             List<Column> dataMappingColumns = getDataModel().getDataMappingColumns();
             assertTrue("Could not load the data columns from xml.",
diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
index d9de663..91c857d 100755
--- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
+++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java
@@ -28,6 +28,7 @@ import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.webapp.WebAppContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 /**
  * tracing web app runner
  */