You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ch...@apache.org on 2019/07/11 20:48:12 UTC

[phoenix] branch master updated: PHOENIX-5228 use slf4j for logging in phoenix project (addendum)

This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 02304e6  PHOENIX-5228 use slf4j for logging in phoenix project (addendum)
02304e6 is described below

commit 02304e6390bcba908af21da2dd124f188b9fc1e4
Author: Xinyi <xy...@salesforce.com>
AuthorDate: Sun Jun 16 16:34:11 2019 -0700

    PHOENIX-5228 use slf4j for logging in phoenix project (addendum)
    
    Signed-off-by: Chinmay Kulkarni <ch...@apache.org>
---
 .../phoenix/end2end/index/MutableIndexIT.java      |   4 +-
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      |   8 +-
 .../java/org/apache/phoenix/cache/GlobalCache.java |  11 +--
 .../apache/phoenix/cache/ServerCacheClient.java    |  19 ++--
 .../org/apache/phoenix/cache/TenantCacheImpl.java  |   6 +-
 .../cache/aggcache/SpillableGroupByCache.java      |   8 +-
 .../org/apache/phoenix/compile/FromCompiler.java   |  11 ++-
 .../GroupedAggregateRegionObserver.java            |  16 ++--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   6 +-
 .../coprocessor/MetaDataRegionObserver.java        | 100 +++++++++++----------
 .../coprocessor/PhoenixAccessController.java       |   6 +-
 .../phoenix/coprocessor/TaskRegionObserver.java    |  24 ++---
 .../coprocessor/tasks/DropChildViewsTask.java      |   3 +-
 .../coprocessor/tasks/IndexRebuildTask.java        |   3 +-
 .../org/apache/phoenix/execute/BaseQueryPlan.java  |   6 +-
 .../org/apache/phoenix/execute/HashJoinPlan.java   |   6 +-
 .../expression/function/CollationKeyFunction.java  |   6 +-
 .../org/apache/phoenix/hbase/index/Indexer.java    |  18 ++--
 .../hbase/index/util/IndexManagementUtil.java      |   3 +-
 .../index/write/ParallelWriterIndexCommitter.java  |   1 -
 .../hbase/index/write/RecoveryIndexWriter.java     |   3 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  19 ++--
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |   5 +-
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  |  12 ++-
 .../apache/phoenix/log/QueryLoggerDisruptor.java   |   5 +-
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |   3 +-
 .../phoenix/mapreduce/PhoenixRecordReader.java     |   9 +-
 .../apache/phoenix/mapreduce/index/IndexTool.java  |   7 +-
 .../index/PhoenixIndexImportDirectReducer.java     |   3 +-
 .../index/PhoenixIndexPartialBuildMapper.java      |   3 +-
 .../index/PhoenixServerBuildIndexMapper.java       |   4 -
 .../index/automation/PhoenixMRJobSubmitter.java    |   3 +-
 .../monitoring/GlobalMetricRegistriesAdapter.java  |   6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java |  33 ++++---
 .../schema/stats/DefaultStatisticsCollector.java   |   3 +-
 .../phoenix/schema/stats/StatisticsScanner.java    |  12 ++-
 .../java/org/apache/phoenix/trace/TraceReader.java |   4 +-
 .../phoenix/util/EquiDepthStreamHistogram.java     |   3 +-
 38 files changed, 236 insertions(+), 166 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 43526a2..2f7b1c9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -50,12 +50,14 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.primitives.Doubles;
 
 @RunWith(Parameterized.class)
 public class MutableIndexIT extends ParallelStatsDisabledIT {
-    
+    private static final Logger LOGGER = LoggerFactory.getLogger(MutableIndexIT.class);
     protected final boolean localIndex;
     private final String tableDDLOptions;
     
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index fbec7b8..0d15b63 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -26,8 +26,6 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.Marker;
-import org.slf4j.MarkerFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -37,8 +35,8 @@ import com.google.common.base.Preconditions;
  */
 public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
-    private static final Marker fatal = MarkerFactory.getMarker("FATAL");
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
     private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
             "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
 
@@ -50,7 +48,7 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
             // happens in <=0.98.4 where the scheduler factory is not visible
             delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
         } catch (IllegalAccessError e) {
-            LOGGER.error(fatal, VERSION_TOO_OLD_FOR_INDEX_RPC);
+            LOGGER.error(VERSION_TOO_OLD_FOR_INDEX_RPC);
             throw e;
         }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
index a6512ea..a18ec6d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
@@ -66,8 +66,9 @@ public class GlobalCache extends TenantCacheImpl {
     public long clearTenantCache() {
         long unfreedBytes = getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory();
         if (unfreedBytes != 0 && LOGGER.isDebugEnabled()) {
-            LOGGER.debug("Found " + (getMemoryManager().getMaxMemory() - getMemoryManager()
-                    .getAvailableMemory()) + " bytes not freed from global cache");
+            LOGGER.debug("Found " + (getMemoryManager().getMaxMemory() -
+                    getMemoryManager().getAvailableMemory()) +
+                    " bytes not freed from global cache");
         }
         removeAllServerCache();
         for (Map.Entry<ImmutableBytesWritable, TenantCache> entry : perTenantCacheMap.entrySet()) {
@@ -75,9 +76,9 @@ public class GlobalCache extends TenantCacheImpl {
             long unfreedTenantBytes = cache.getMemoryManager().getMaxMemory() - cache.getMemoryManager().getAvailableMemory();
             if (unfreedTenantBytes != 0 && LOGGER.isDebugEnabled()) {
                 ImmutableBytesWritable cacheId = entry.getKey();
-                LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant "
-                        + Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(),
-                        cacheId.getLength()));
+                LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " +
+                        Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(),
+                                cacheId.getLength()));
             }
             unfreedBytes += unfreedTenantBytes;
             cache.removeAllServerCache();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 8c12311..2be81af 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -282,8 +282,8 @@ public class ServerCacheClient {
                     // Call RPC once per server
                     servers.add(entry);
                     if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug(addCustomAnnotations("Adding cache entry " +
-                            "to be sent for " + entry, connection));
+                        LOGGER.debug(addCustomAnnotations(
+                                "Adding cache entry to be sent for " + entry, connection));
                     }
                     final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                     final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
@@ -312,12 +312,13 @@ public class ServerCacheClient {
                     }));
                 } else {
                     if (LOGGER.isDebugEnabled()) {
-                        LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for "
-                                + entry + " since one already exists for that entry", connection));
+                        LOGGER.debug(addCustomAnnotations(
+                                "NOT adding cache entry to be sent for " + entry +
+                                        " since one already exists for that entry", connection));
                     }
                 }
             }
-            
+
             hashCacheSpec = new ServerCache(cacheId,servers,cachePtr, services, storeCacheOnClient);
             // Execute in parallel
             int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
@@ -427,13 +428,15 @@ public class ServerCacheClient {
                         remainingOnServers.remove(entry);
                     } catch (Throwable t) {
                         lastThrowable = t;
-                        LOGGER.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
-                                t);
+                        LOGGER.error(addCustomAnnotations(
+                                "Error trying to remove hash cache for " + entry,
+                                connection), t);
                     }
                 }
             }
             if (!remainingOnServers.isEmpty()) {
-                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for "
+                                + remainingOnServers, connection),
                         lastThrowable);
             }
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
index 8038e9e..22a7050 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
@@ -208,7 +208,8 @@ public class TenantCacheImpl implements TenantCache {
             ImmutableBytesPtr cacheId = entry.getCacheId();
             getPersistentServerCaches().invalidate(cacheId);
             available = this.getMemoryManager().getAvailableMemory();
-            LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have " + available + " bytes available");
+            LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have "
+                    + available + " bytes available");
         }
     }
 
@@ -273,7 +274,8 @@ public class TenantCacheImpl implements TenantCache {
         }
         entry.decrementLiveQueryCount();
         if (!entry.isLive()) {
-            LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get()) + " is no longer live, invalidate it");
+            LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get())
+                    + " is no longer live, invalidate it");
             getServerCaches().invalidate(cacheId);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index bb9e63b..9f75d31 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -148,7 +148,8 @@ public class SpillableGroupByCache implements GroupByCache {
         try {
             this.chunk = tenantCache.getMemoryManager().allocate(estSize);
         } catch (InsufficientMemoryException ime) {
-            LOGGER.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
+            LOGGER.error("Requested Map size exceeds memory limit, " +
+                    "please decrease max size via config paramter: "
                     + GROUPBY_MAX_CACHE_SIZE_ATTRIB);
             throw ime;
         }
@@ -361,8 +362,9 @@ public class SpillableGroupByCache implements GroupByCache {
                 byte[] value = aggregators.toBytes(aggs);
                 if (LOGGER.isDebugEnabled()) {
                     LOGGER.debug("Adding new distinct group: "
-                            + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators "
-                            + aggs.toString() + " value = " + Bytes.toStringBinary(value));
+                            + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) +
+                            " with aggregators " + aggs.toString() + " value = " +
+                            Bytes.toStringBinary(value));
                 }
                 results.add(PhoenixKeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
                         SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 943c0d4..ce0c3a1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -647,7 +647,12 @@ public class FromCompiler {
             }
             TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
             if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
-                LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns(), connection));
+                LOGGER.debug(LogUtil.addCustomAnnotations(
+                        "Re-resolved stale table " + fullTableName + " with seqNum "
+                                + tableRef.getTable().getSequenceNumber() + " at timestamp "
+                                + tableRef.getTable().getTimeStamp() + " with "
+                                + tableRef.getTable().getColumns().size() + " columns: "
+                                + tableRef.getTable().getColumns(), connection));
             }
             return tableRef;
         }
@@ -696,7 +701,9 @@ public class FromCompiler {
             }
             
             if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
-                LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " + functionNames.toString() + "at timestamp " + timeStamp, connection));
+                LOGGER.debug(LogUtil.addCustomAnnotations(
+                        "Re-resolved stale function " + functionNames.toString() +
+                                "at timestamp " + timeStamp, connection));
             }
             return functionsFound;
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 78a1fc0..f521803 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -391,8 +391,10 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
             final RegionScanner scanner, final List<Expression> expressions,
             final ServerAggregators aggregators, long limit) throws IOException {
         if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
-                    + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Grouped aggregation over unordered rows with scan " + scan
+                    + ", group by " + expressions + ", aggregators " + aggregators,
+                    ScanUtil.getCustomAnnotations(scan)));
         }
         RegionCoprocessorEnvironment env = c.getEnvironment();
         Configuration conf = env.getConfiguration();
@@ -419,7 +421,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
             boolean hasMore;
             Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
             if (LOGGER.isDebugEnabled()) {
-                LOGGER.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
+                LOGGER.debug(LogUtil.addCustomAnnotations(
+                        "Spillable groupby enabled: " + spillableEnabled,
+                        ScanUtil.getCustomAnnotations(scan)));
             }
             Region region = c.getEnvironment().getRegion();
             boolean acquiredLock = false;
@@ -475,8 +479,10 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im
             final ServerAggregators aggregators, final long limit) throws IOException {
 
         if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by "
-                    + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Grouped aggregation over ordered rows with scan " + scan + ", group by "
+                    + expressions + ", aggregators " + aggregators,
+                    ScanUtil.getCustomAnnotations(scan)));
         }
         final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
         final boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 7dca023..089951b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -4361,7 +4361,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 rowLock.release();
             }
         } catch (Throwable t) {
-          LOGGER.error("updateIndexState failed", t);
+            LOGGER.error("updateIndexState failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
@@ -4600,7 +4600,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-          LOGGER.error("createFunction failed", t);
+            LOGGER.error("createFunction failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(Bytes.toString(functionName), t));
         }
@@ -4652,7 +4652,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
                 ServerUtil.releaseRowLocks(locks);
             }
         } catch (Throwable t) {
-          LOGGER.error("dropFunction failed", t);
+            LOGGER.error("dropFunction failed", t);
             ProtobufUtil.setControllerException(controller,
                 ServerUtil.createIOException(Bytes.toString(functionName), t));
         }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 2347c5e..2fc0876 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -35,8 +35,6 @@ import java.util.concurrent.TimeUnit;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -55,8 +53,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.compile.MutationPlan;
 import org.apache.phoenix.compile.PostDDLCompiler;
@@ -88,6 +84,8 @@ import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.UpgradeUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
@@ -101,7 +99,7 @@ import com.google.common.collect.Maps;
  */
 @SuppressWarnings("deprecation")
 public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor {
-    public static final Log LOG = LogFactory.getLog(MetaDataRegionObserver.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(MetaDataRegionObserver.class);
     public static final String REBUILD_INDEX_APPEND_TO_URL_STRING = "REBUILDINDEX";
     // PHOENIX-5094 To differentiate the increment in PENDING_DISABLE_COUNT made by client or index
     // rebuilder, we are using large value for index rebuilder
@@ -182,16 +180,16 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                         @Override
                         public Void run() throws Exception {
                             if (UpgradeUtil.truncateStats(mTable, sTable)) {
-                                LOG.info("Stats are successfully truncated for upgrade 4.7!!");
+                                LOGGER.info("Stats are successfully truncated for upgrade 4.7!!");
                             }
                             return null;
                         }
                     });
 
                 } catch (Exception exception) {
-                    LOG.warn("Exception while truncate stats..,"
+                    LOGGER.warn("Exception while truncate stats..,"
                             + " please check and delete stats manually inorder to get proper result with old client!!");
-                    LOG.warn(exception.getStackTrace());
+                    LOGGER.warn(exception.getStackTrace().toString());
                 } finally {
                     try {
                         if (metaTable != null) {
@@ -209,14 +207,9 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
         t.start();
 
         if (!enableRebuildIndex) {
-            LOG.info("Failure Index Rebuild is skipped by configuration.");
+            LOGGER.info("Failure Index Rebuild is skipped by configuration.");
             return;
         }
-        // turn off verbose deprecation logging
-        Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
-        if (deprecationLogger != null) {
-            deprecationLogger.setLevel(Level.WARN);
-        }
         // Ensure we only run one of the index rebuilder tasks
         if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) {
             try {
@@ -226,7 +219,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                 BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
                 executor.scheduleWithFixedDelay(task, initialRebuildTaskDelay, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS);
             } catch (ClassNotFoundException ex) {
-                LOG.error("BuildIndexScheduleTask cannot start!", ex);
+                LOGGER.error("BuildIndexScheduleTask cannot start!", ex);
             }
         }
     }
@@ -273,7 +266,9 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                     IndexUtil.incrementCounterForIndex(conn, indexName, -PENDING_DISABLE_INACTIVE_STATE_COUNT);
                     indexesIncremented.add(index);
                 }catch(Exception e) {
-                    LOG.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" + index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
+                    LOGGER.warn("Decrement  of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT
+                            +" for index :" + index.getName().getString() + "of table: "
+                            + dataPTable.getName().getString(), e);
                 }
             }
             return indexesIncremented;
@@ -312,7 +307,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                     results.clear();
                     hasMore = scanner.next(results);
                     if (results.isEmpty()) {
-                        LOG.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP");
+                        LOGGER.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP");
                         break;
                     }
 
@@ -322,7 +317,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                     Cell indexStateCell = r.getColumnLatestCell(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
 
                     if (disabledTimeStamp == null || disabledTimeStamp.length == 0) {
-                        LOG.debug("Null or empty INDEX_DISABLE_TIMESTAMP");
+                        LOGGER.debug("Null or empty INDEX_DISABLE_TIMESTAMP");
                         continue;
                     }
 
@@ -333,7 +328,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                         PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
                     if ((dataTable == null || dataTable.length == 0) || indexStateCell == null) {
                         // data table name can't be empty
-                        LOG.debug("Null or data table name or index state");
+                        LOGGER.debug("Null or data table name or index state");
                         continue;
                     }
 
@@ -345,14 +340,15 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 
                     // validity check
                     if (indexTable == null || indexTable.length == 0) {
-                        LOG.debug("We find IndexTable empty during rebuild scan:" + scan
+                        LOGGER.debug("We find IndexTable empty during rebuild scan:" + scan
                                 + "so, Index rebuild has been skipped for row=" + r);
                         continue;
                     }
                     
                     String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable);
                     if (onlyTheseTables != null && !onlyTheseTables.contains(dataTableFullName)) {
-                        LOG.debug("Could not find " + dataTableFullName + " in " + onlyTheseTables);
+                        LOGGER.debug("Could not find " + dataTableFullName +
+                                " in " + onlyTheseTables);
                         continue;
                     }
 
@@ -366,7 +362,8 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                     PTable indexPTable = PhoenixRuntime.getTableNoCache(conn, indexTableFullName);
                     // Sanity check in case index was removed from table
                     if (!dataPTable.getIndexes().contains(indexPTable)) {
-                        LOG.debug(dataTableFullName + " does not contain " + indexPTable.getName().getString());
+                        LOGGER.debug(dataTableFullName + " does not contain " +
+                                indexPTable.getName().getString());
                         continue;
                     }
                     
@@ -388,8 +385,8 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                     // an index write fails.
                     if ((indexState == PIndexState.DISABLE || indexState == PIndexState.PENDING_ACTIVE)
                             && !MetaDataUtil.tableRegionsOnline(this.env.getConfiguration(), indexPTable)) {
-                        LOG.debug("Index rebuild has been skipped because not all regions of index table="
-                                + indexPTable.getName() + " are online.");
+                        LOGGER.debug("Index rebuild has been skipped because not all regions of" +
+                                " index table=" + indexPTable.getName() + " are online.");
                         continue;
                     }
 
@@ -402,12 +399,13 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                          */
                         try {
                             IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, 0l);
-                            LOG.error("Unable to rebuild index " + indexTableFullName
-                                    + ". Won't attempt again since index disable timestamp is older than current time by "
-                                    + indexDisableTimestampThreshold
-                                    + " milliseconds. Manual intervention needed to re-build the index");
+                            LOGGER.error("Unable to rebuild index " + indexTableFullName
+                                    + ". Won't attempt again since index disable timestamp is" +
+                                    " older than current time by " + indexDisableTimestampThreshold
+                                    + " milliseconds. Manual intervention needed to re-build" +
+                                    " the index");
                         } catch (Throwable ex) {
-                            LOG.error(
+                            LOGGER.error(
                                 "Unable to mark index " + indexTableFullName + " as disabled.", ex);
                         }
                         continue; // don't attempt another rebuild irrespective of whether
@@ -426,7 +424,8 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                         IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, null);
                         continue; // Must wait until clients start to do index maintenance again
                     } else if (indexState != PIndexState.INACTIVE && indexState != PIndexState.ACTIVE) {
-                        LOG.warn("Unexpected index state of " + indexTableFullName + "=" + indexState + ". Skipping partial rebuild attempt.");
+                        LOGGER.warn("Unexpected index state of " + indexTableFullName + "="
+                                + indexState + ". Skipping partial rebuild attempt.");
                         continue;
                     }
                     long currentTime = EnvironmentEdgeManager.currentTimeMillis();
@@ -435,7 +434,9 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                                     QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME);
                     // Wait until no failures have occurred in at least forwardOverlapDurationMs
                     if (indexStateCell.getTimestamp() + forwardOverlapDurationMs > currentTime) {
-                        LOG.debug("Still must wait " + (indexStateCell.getTimestamp() + forwardOverlapDurationMs - currentTime) + " before starting rebuild for " + indexTableFullName);
+                        LOGGER.debug("Still must wait " + (indexStateCell.getTimestamp() +
+                                forwardOverlapDurationMs - currentTime) +
+                                " before starting rebuild for " + indexTableFullName);
                         continue; // Haven't waited long enough yet
                     }
                     Long upperBoundOfRebuild = indexStateCell.getTimestamp() + forwardOverlapDurationMs;
@@ -446,8 +447,9 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
                         indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
                         dataTableToIndexesMap.put(dataPTable, indexesToPartiallyRebuild);
                     }
-                    LOG.debug("We have found " + indexPTable.getIndexState() + " Index:" + indexPTable.getName()
-                            + " on data table:" + dataPTable.getName() + " which failed to be updated at "
+                    LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" +
+                            indexPTable.getName() + " on data table:" + dataPTable.getName() +
+                            " which failed to be updated at "
                             + indexPTable.getIndexDisableTimestamp());
                     indexesToPartiallyRebuild.add(new Pair<PTable,Long>(indexPTable,upperBoundOfRebuild));
                 } while (hasMore);
@@ -482,7 +484,10 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 								long disabledTimeStampVal = index.getIndexDisableTimestamp();
 								if (disabledTimeStampVal != 0) {
                                     if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) {
-                                        LOG.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild);
+                                        LOGGER.warn("Found unexpected mix of signs with " +
+                                                "INDEX_DISABLE_TIMESTAMP for " +
+                                                dataPTable.getName().getString() + " with " +
+                                                indexesToPartiallyRebuild);
                                     }
 								    signOfDisableTimeStamp = Long.signum(disabledTimeStampVal);
 	                                disabledTimeStampVal = Math.abs(disabledTimeStampVal);
@@ -499,14 +504,15 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 							}
 							// No indexes are disabled, so skip this table
 							if (earliestDisableTimestamp == Long.MAX_VALUE) {
-                                LOG.debug("No indexes are disabled so continuing");
+                                LOGGER.debug("No indexes are disabled so continuing");
 								continue;
 							}
 							long scanBeginTime = Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs);
                             long scanEndTime = Math.min(latestUpperBoundTimestamp,
                                     getTimestampForBatch(scanBeginTime,batchExecutedPerTableMap.get(dataPTable.getName())));
-                            LOG.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
-									+ " from timestamp=" + scanBeginTime + " until " + scanEndTime);
+                            LOGGER.info("Starting to build " + dataPTable + " indexes "
+                                    + indexesToPartiallyRebuild + " from timestamp=" +
+                                    scanBeginTime + " until " + scanEndTime);
 							
 							TableRef tableRef = new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false);
 							// TODO Need to set high timeout
@@ -525,7 +531,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 							byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr);
 							dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue);
 							ScanUtil.setClientVersion(dataTableScan, MetaDataProtocol.PHOENIX_VERSION);
-                            LOG.info("Starting to partially build indexes:" + indexesToPartiallyRebuild
+                            LOGGER.info("Starting to partially build indexes:" + indexesToPartiallyRebuild
                                     + " on data table:" + dataPTable.getName() + " with the earliest disable timestamp:"
                                     + earliestDisableTimestamp + " till "
                                     + (scanEndTime == HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime));
@@ -533,10 +539,10 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 							long rowCount = mutationState.getUpdateCount();
 							decrementIndexesPendingDisableCount(conn, dataPTable, indexesToPartiallyRebuild);
                             if (scanEndTime == latestUpperBoundTimestamp) {
-                                LOG.info("Rebuild completed for all inactive/disabled indexes in data table:"
+                                LOGGER.info("Rebuild completed for all inactive/disabled indexes in data table:"
                                         + dataPTable.getName());
                             }
-                            LOG.info(" no. of datatable rows read in rebuilding process is " + rowCount);
+                            LOGGER.info(" no. of datatable rows read in rebuilding process is " + rowCount);
 							for (PTable indexPTable : indexesToPartiallyRebuild) {
 								String indexTableFullName = SchemaUtil.getTableName(
 										indexPTable.getSchemaName().getString(),
@@ -546,7 +552,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 								        IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
 								            latestUpperBoundTimestamp);
 								        batchExecutedPerTableMap.remove(dataPTable.getName());
-                                        LOG.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
+                                        LOGGER.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
 								    } else {
 								        // Increment timestamp so that client sees updated disable timestamp
 								        IndexUtil.updateIndexState(conn, indexTableFullName, indexPTable.getIndexState(),
@@ -556,34 +562,34 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
 								            noOfBatches = 0l;
 								        }
 								        batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-                                        LOG.info(
+                                        LOGGER.info(
 								            "During Round-robin build: Successfully updated index disabled timestamp  for "
 								                + indexTableFullName + " to " + scanEndTime);
 								    }
 								} catch (SQLException e) {
-                                    LOG.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
+                                    LOGGER.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
 								}
 							}
 						} catch (Exception e) {
-                            LOG.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
+                            LOGGER.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
 						}
 					}
 				}
 			} catch (Throwable t) {
-                LOG.warn("ScheduledBuildIndexTask failed!", t);
+                LOGGER.warn("ScheduledBuildIndexTask failed!", t);
 			} finally {
 				if (scanner != null) {
 					try {
 						scanner.close();
 					} catch (IOException ignored) {
-                        LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
+                        LOGGER.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
 					}
 				}
 				if (conn != null) {
 					try {
 						conn.close();
 					} catch (SQLException ignored) {
-                        LOG.debug("ScheduledBuildIndexTask can't close connection", ignored);
+                        LOGGER.debug("ScheduledBuildIndexTask can't close connection", ignored);
 					}
 				}
 			}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index aec28de..1ab6e6d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -132,7 +132,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
         this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
                 QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
         if (!this.accessCheckEnabled) {
-            LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
+            LOGGER.warn(
+                    "PhoenixAccessController has been loaded with authorization checks disabled.");
         }
         if (env instanceof PhoenixMetaDataControllerEnvironment) {
             this.env = (PhoenixMetaDataControllerEnvironment)env;
@@ -601,7 +602,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
               }
             }
         } else if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
+            LOGGER.debug("No permissions found for table=" +
+                    table + " or namespace=" + table.getNamespaceAsString());
         }
         return false;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
index a6c5328..a48d82f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
@@ -36,26 +36,22 @@ import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableMap;
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.TaskType;
-
 import org.apache.phoenix.schema.task.Task;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.QueryUtil;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Coprocessor for task related operations. This coprocessor would only be registered
@@ -63,7 +59,7 @@ import org.apache.phoenix.util.QueryUtil;
  */
 
 public class TaskRegionObserver implements RegionObserver, RegionCoprocessor {
-    public static final Log LOG = LogFactory.getLog(TaskRegionObserver.class);
+    public static final Logger LOGGER = LoggerFactory.getLogger(TaskRegionObserver.class);
 
     protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(TaskType.values().length);
     private long timeInterval = QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS;
@@ -141,12 +137,6 @@ public class TaskRegionObserver implements RegionObserver, RegionCoprocessor {
     public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
         final RegionCoprocessorEnvironment env = e.getEnvironment();
 
-        // turn off verbose deprecation logging
-        Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
-        if (deprecationLogger != null) {
-            deprecationLogger.setLevel(Level.WARN);
-        }
-
         SelfHealingTask task = new SelfHealingTask(e.getEnvironment(), timeMaxInterval);
         executor.scheduleWithFixedDelay(task, initialDelay, timeInterval, TimeUnit.MILLISECONDS);
     }
@@ -175,7 +165,7 @@ public class TaskRegionObserver implements RegionObserver, RegionCoprocessor {
                     try {
                         TaskType taskType = taskRecord.getTaskType();
                         if (!classMap.containsKey(taskType)) {
-                            LOG.warn("Don't know how to execute task type: " + taskType.name());
+                            LOGGER.warn("Don't know how to execute task type: " + taskType.name());
                             continue;
                         }
 
@@ -228,7 +218,7 @@ public class TaskRegionObserver implements RegionObserver, RegionCoprocessor {
 
                     }
                     catch (Throwable t) {
-                        LOG.warn("Exception while running self healingtask. " +
+                        LOGGER.warn("Exception while running self healingtask. " +
                                 "It will be retried in the next system task table scan : " +
                                 " taskType : " + taskRecord.getTaskType().name() +
                                 taskRecord.getSchemaName()  + "." + taskRecord.getTableName() +
@@ -237,13 +227,13 @@ public class TaskRegionObserver implements RegionObserver, RegionCoprocessor {
                     }
                 }
             } catch (Throwable t) {
-                LOG.error("SelfHealingTask failed!", t);
+                LOGGER.error("SelfHealingTask failed!", t);
             } finally {
                 if (connForTask != null) {
                     try {
                         connForTask.close();
                     } catch (SQLException ignored) {
-                        LOG.debug("SelfHealingTask can't close connection", ignored);
+                        LOGGER.debug("SelfHealingTask can't close connection", ignored);
                     }
                 }
             }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
index e8d5aed..79d90f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
@@ -61,7 +61,8 @@ public class DropChildViewsTask extends BaseTask {
                 return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
             } else if (System.currentTimeMillis() < timeMaxInterval + timestamp.getTime()) {
                 // skip this task as it has not been expired and its parent table has not been dropped yet
-                LOGGER.info("Skipping a child view drop task. The parent table has not been dropped yet : " +
+                LOGGER.info("Skipping a child view drop task. " +
+                        "The parent table has not been dropped yet : " +
                         taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
                         " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) +
                         " and timestamp " + timestamp.toString());
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
index d42b4be..ad3e5db 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
@@ -165,7 +165,8 @@ public class IndexRebuildTask extends BaseTask  {
 
             if (job != null && job.isComplete()) {
                 if (job.isSuccessful()) {
-                    LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName());
+                    LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful "
+                            + taskRecord.getTableName());
                     return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
                 } else {
                     return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index d3ed744..3f10ad0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -358,12 +358,14 @@ public abstract class BaseQueryPlan implements QueryPlan {
         }
         
         if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Scan ready for iteration: " + scan, connection));
         }
         
         ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
         if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Iterator ready: " + iterator, connection));
         }
 
         // wrap the iterator so we start/end tracing as we expect
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 2117d22..e8f761a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -553,7 +553,8 @@ public class HashJoinPlan extends DelegateQueryPlan {
                     } else {
                         cacheId = Bytes.toBytes(RANDOM.nextLong());
                     }
-                    LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString);
+                    LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) +
+                            " for " + queryString);
                     if (cache == null) {
                         LOGGER.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId));
                         cache = parent.hashClient.addHashCache(ranges, cacheId, iterator,
@@ -566,7 +567,8 @@ public class HashJoinPlan extends DelegateQueryPlan {
                                 - parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive) {
                             LOGGER.warn(addCustomAnnotations(
                                 "Hash plan [" + index
-                                        + "] execution seems too slow. Earlier hash cache(s) might have expired on servers.",
+                                        + "] execution seems too slow. Earlier" +
+                                        " hash cache(s) might have expired on servers.",
                                 parent.delegate.getContext().getConnection()));
                         }
                     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
index b2fd132..f5cbdc4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -125,7 +125,8 @@ public class CollationKeyFunction extends ScalarFunction {
 		byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
 
 		if (LOGGER.isTraceEnabled()) {
-			LOGGER.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+			LOGGER.trace("CollationKey bytes: " +
+					VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
 		}
 
 		ptr.set(collationKeyByteArray);
@@ -167,7 +168,8 @@ public class CollationKeyFunction extends ScalarFunction {
 		}
 
 		if (LOGGER.isTraceEnabled()) {
-			LOGGER.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+			LOGGER.trace(String.format(
+					"Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
 					collator.getStrength(), collator.getDecomposition(),
 					BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
 		}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 668d0a6..9e27768 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -322,7 +322,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock",
+                          duration, slowPreIncrementThreshold));
               }
               metricSource.incrementSlowDuplicateKeyCheckCalls();
           }
@@ -346,7 +347,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("preBatchMutate",
+                          duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -498,7 +500,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexPrepareThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
+                  LOGGER.debug(getCallTooSlowMessage(
+                          "indexPrepare", duration, slowIndexPrepareThreshold));
               }
               metricSource.incrementNumSlowIndexPrepareCalls();
           }
@@ -569,7 +572,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
            long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
            if (duration >= slowIndexWriteThreshold) {
                if (LOGGER.isDebugEnabled()) {
-                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
+                   LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably",
+                           duration, slowIndexWriteThreshold));
                }
                metricSource.incrementNumSlowIndexWriteCalls();
            }
@@ -609,7 +613,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowIndexWriteThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("indexWrite",
+                          duration, slowIndexWriteThreshold));
               }
               metricSource.incrementNumSlowIndexWriteCalls();
           }
@@ -717,7 +722,8 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
           if (duration >= slowPreWALRestoreThreshold) {
               if (LOGGER.isDebugEnabled()) {
-                  LOGGER.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
+                  LOGGER.debug(getCallTooSlowMessage("preWALRestore",
+                          duration, slowPreWALRestoreThreshold));
               }
               metricSource.incrementNumSlowPreWALRestoreCalls();
           }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index a39782e..a027f54 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -202,7 +202,8 @@ public class IndexManagementUtil {
             throw e1;
         }
         catch (Throwable e1) {
-            LOGGER.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
+            LOGGER.info("Rethrowing " + e1 + " as a " +
+                    IndexBuildingFailureException.class.getSimpleName());
             throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 04152a3..cd7c483 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -18,7 +18,6 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 import com.google.common.collect.Multimap;
 
 /**
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index d7ffd33..eb6194c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -114,7 +114,8 @@ public class RecoveryIndexWriter extends IndexWriter {
             ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
             HTableInterfaceReference table = tables.get(ptr);
             if (nonExistingTablesList.contains(table)) {
-                LOGGER.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
+                LOGGER.debug("Edits found for non existing table: " +
+                        table.getTableName() + " so skipping it!!");
                 continue;
             }
             if (table == null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 9cf7d96..66e4250 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -282,25 +282,29 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                         MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
                                 systemTable, newState);
                         if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                            LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+                            LOGGER.info("Index " + indexTableName +
+                                    " has been dropped. Ignore uncommitted mutations");
                             continue;
                         }
                         if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                             if (leaveIndexActive) {
-                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP "
+                                        + " failed with code = "
                                         + result.getMutationCode());
                                 // If we're not disabling the index, then we don't want to throw as throwing
                                 // will lead to the RS being shutdown.
                                 if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
                                         "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
                             } else {
-                                LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = "
-                                        + result.getMutationCode() + ". Will use default failure policy instead.");
+                                LOGGER.warn("Attempt to disable index " + indexTableName +
+                                        " failed with code = " + result.getMutationCode() +
+                                        ". Will use default failure policy instead.");
                                 throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                             }
                         }
-                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
-                            + " due to an exception while writing updates. indexState=" + newState,
+                        LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " +
+                                        indexTableName + " due to an exception while" +
+                                        " writing updates. indexState=" + newState,
                         cause);
                     } catch (Throwable t) {
                         if (t instanceof Exception) {
@@ -351,7 +355,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                                         mutation.getRow().length - offset));
                 String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
                 if (indexTableName == null) {
-                    LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
+                    LOGGER.error("Unable to find local index on " + ref.getTableName() +
+                            " with viewID of " + Bytes.toStringBinary(viewId));
                 } else {
                     indexTableNames.add(indexTableName);
                 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 0ed9ecd..5412e52 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -67,7 +67,6 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
     /**
      * The protocol for Phoenix Network Client 
      */ 
-    private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(PhoenixEmbeddedDriver.class);
     private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//";
     private final static String DRIVER_NAME = "PhoenixEmbeddedDriver";
     private static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -634,8 +633,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
                 throw getMalFormedUrlException(url);
             }
             String znodeParent = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
-            LOGGER.debug("Getting default jdbc connection url " + quorum + ":" +
-                    port + ":" + znodeParent);
+            LOGGER.debug("Getting default jdbc connection url "
+                    + quorum + ":" + port + ":" + znodeParent);
             return new ConnectionInfo(quorum, port, znodeParent);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 5580672..0df11ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -313,7 +313,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                         ResultIterator resultIterator = plan.iterator();
                         if (LOGGER.isDebugEnabled()) {
                             String explainPlan = QueryUtil.getExplainPlan(resultIterator);
-                            LOGGER.debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection));
+                            LOGGER.debug(LogUtil.addCustomAnnotations(
+                                    "Explain plan: " + explainPlan, connection));
                         }
                         StatementContext context = plan.getContext();
                         context.setQueryLogger(queryLogger);
@@ -339,7 +340,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                     catch (MetaDataEntityNotFoundException e) {
                         if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
                             if(LOGGER.isDebugEnabled())
-                                LOGGER.debug("Reloading table "+ e.getTableName()+" data from server");
+                                LOGGER.debug("Reloading table "
+                                        + e.getTableName()+" data from server");
                             if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
                                 e.getSchemaName(), e.getTableName(), true).wasUpdated()){
                                 //TODO we can log retry count and error for debugging in LOG table
@@ -425,7 +427,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
                             catch (MetaDataEntityNotFoundException e) {
                                 if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
                                     if(LOGGER.isDebugEnabled())
-                                        LOGGER.debug("Reloading table "+ e.getTableName()+" data from server");
+                                        LOGGER.debug("Reloading table "+ e.getTableName()
+                                                +" data from server");
                                     if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
                                         e.getSchemaName(), e.getTableName(), true).wasUpdated()){
                                         return executeMutation(stmt, false);
@@ -1797,7 +1800,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
     @Override
     public ResultSet executeQuery(String sql) throws SQLException {
         if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(LogUtil.addCustomAnnotations("Execute query: " + sql, connection));
+            LOGGER.debug(LogUtil.addCustomAnnotations(
+                    "Execute query: " + sql, connection));
         }
         
         CompilableStatement stmt = parseStatement(sql);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index c4f227a..1d3ebc9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -76,8 +76,9 @@ public class QueryLoggerDisruptor implements Closeable{
 
         final QueryLogDetailsEventHandler[] handlers = { new QueryLogDetailsEventHandler(configuration) };
         disruptor.handleEventsWith(handlers);
-        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
-                + ", waitStrategy=" + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
+        LOGGER.info("Starting  QueryLoggerDisruptor for with ringbufferSize=" +
+                disruptor.getRingBuffer().getBufferSize() + ", waitStrategy=" +
+                waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
                 + errorHandler + "...");
         disruptor.start();
         
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index 7334afc..3e999c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -884,7 +884,8 @@ public class OrphanViewTool extends Configured implements Tool {
             }
             return 0;
         } catch (Exception ex) {
-            LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
+            LOGGER.error("Orphan View Tool : An exception occurred "
+                    + ExceptionUtils.getMessage(ex) + " at:\n" +
                     ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index 54d2750..87f848f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -113,7 +113,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
         final PhoenixInputSplit pSplit = (PhoenixInputSplit)split;
         final List<Scan> scans = pSplit.getScans();
         try {
-            LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange());
+            LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: "
+                    + pSplit.getKeyRange());
             List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
             StatementContext ctx = queryPlan.getContext();
             ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
@@ -161,7 +162,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
 
             this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
         } catch (SQLException e) {
-            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",
+                    e.getMessage()));
             Throwables.propagate(e);
         }
    }
@@ -182,7 +184,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
             value.readFields(resultSet);
             return true;
         } catch (SQLException e) {
-            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
+            LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",
+                    e.getMessage()));
             throw new RuntimeException(e);
         }
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index baae79c..48cf095 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -665,7 +665,9 @@ public class IndexTool extends Configured implements Tool {
                     int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt);
                     String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt());
                     double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt);
-                    LOGGER.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
+                    LOGGER.info(String.format("Will split index %s , autosplit=%s ," +
+                            " autoSplitNumRegions=%s , samplingRate=%s", indexTable,
+                            autosplit, autosplitNumRegions, samplingRate));
                     splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, samplingRate, configuration);
                 }
             }
@@ -686,7 +688,8 @@ public class IndexTool extends Configured implements Tool {
                 job.submit();
                 return 0;
             }
-            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
+            LOGGER.info("Running Index Build in Foreground. Waits for the build to complete." +
+                    " This may take a long time!.");
             boolean result = job.waitForCompletion(true);
             
             if (result) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 0813620..5424407 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -49,7 +49,8 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDU
 public class PhoenixIndexImportDirectReducer extends
         Reducer<ImmutableBytesWritable, IntWritable, NullWritable, NullWritable> {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
 
     @Override
     protected void cleanup(Context context) throws IOException, InterruptedException{
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index aa7ea95..6b693a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -59,7 +59,8 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
 
     private PhoenixConnection connection;
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
index 0544d02..35173bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
@@ -43,8 +43,6 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Mapper that does not do much as regions servers actually build the index from the data table regions directly
@@ -52,8 +50,6 @@ import org.slf4j.LoggerFactory;
 public class PhoenixServerBuildIndexMapper extends
         Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
-
     @Override
     protected void setup(final Context context) throws IOException, InterruptedException {
         super.setup(context);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index e00d6db..7433189 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -237,7 +237,8 @@ public class PhoenixMRJobSubmitter {
 
         if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
             AUTO_INDEX_BUILD_LOCK_NAME)) {
-            LOGGER.info("Some other node is already running Automated Index Build. Skipping execution!");
+            LOGGER.info("Some other node is already running Automated Index Build." +
+                    " Skipping execution!");
             return -1;
         }
         // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
index 111a9df..0f1c8c2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
@@ -48,7 +48,8 @@ import org.slf4j.LoggerFactory;
  */
 public class GlobalMetricRegistriesAdapter {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class);
     private static GlobalMetricRegistriesAdapter INSTANCE = new GlobalMetricRegistriesAdapter();
 
     private GlobalMetricRegistriesAdapter() {
@@ -86,7 +87,8 @@ public class GlobalMetricRegistriesAdapter {
 
         private void registerToDefaultMetricsSystem() {
             MetricRegistryInfo info = registry.getMetricRegistryInfo();
-            LOGGER.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
+            LOGGER.info("Registering " + info.getMetricsJmxContext() +
+                    " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
             DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), info.getMetricsDescription(), this);
         }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 27fb417..77b541a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -288,7 +288,8 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
-    private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
     private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
     private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000;
     private static final int TTL_FOR_MUTEX = 15 * 60; // 15min
@@ -450,7 +451,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             this.connection = HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
             GLOBAL_HCONNECTIONS_COUNTER.increment();
-            LOGGER.info("HConnection established. Stacktrace for informational purposes: " + connection + " " +  LogUtil.getCallerStackTrace());
+            LOGGER.info("HConnection established. Stacktrace for informational purposes: "
+                    + connection + " " +  LogUtil.getCallerStackTrace());
         } catch (IOException e) {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
             .setRootCause(e).build().buildException();
@@ -685,7 +687,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             mutator.mutate(metaData);
                             break;
                         } else if (table.getSequenceNumber() >= tableSeqNum) {
-                            LOGGER.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
+                            LOGGER.warn("Attempt to cache older version of " + tableName +
+                                    ": current= " + table.getSequenceNumber() +
+                                    ", new=" + tableSeqNum);
                             break;
                         }
                     } catch (TableNotFoundException e) {
@@ -694,7 +698,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // We waited long enough - just remove the table from the cache
                     // and the next time it's used it'll be pulled over from the server.
                     if (waitTime <= 0) {
-                        LOGGER.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
+                        LOGGER.warn("Unable to update meta data repo within " +
+                                (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) +
+                                " seconds for " + tableName);
                         // There will never be a parentTableName here, as that would only
                         // be non null for an index an we never add/remove columns from an index.
                         metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
@@ -2770,7 +2776,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             stmt.executeUpdate();
             metaConnection.commit();
         } catch (NewerTableAlreadyExistsException e) {
-            LOGGER.warn("Table already modified at this timestamp, so assuming column already nullable: " + columnName);
+            LOGGER.warn("Table already modified at this timestamp," +
+                    " so assuming column already nullable: " + columnName);
         } catch (SQLException e) {
             LOGGER.warn("Add column failed due to:" + e);
             sqlE = e;
@@ -2802,7 +2809,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns );
         } catch (NewerTableAlreadyExistsException e) {
-            LOGGER.warn("Table already modified at this timestamp, so assuming add of these columns already done: " + columns);
+            LOGGER.warn("Table already modified at this timestamp," +
+                    " so assuming add of these columns already done: " + columns);
         } catch (SQLException e) {
             LOGGER.warn("Add column failed due to:" + e);
             sqlE = e;
@@ -2953,7 +2961,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     if (inspectIfAnyExceptionInChain(e, Collections
                                             .<Class<? extends Exception>> singletonList(AccessDeniedException.class))) {
                                         // Pass
-                                        LOGGER.warn("Could not check for Phoenix SYSTEM tables, assuming they exist and are properly configured");
+                                        LOGGER.warn("Could not check for Phoenix SYSTEM tables," +
+                                                " assuming they exist and are properly configured");
                                         checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName());
                                         success = true;
                                     } else if (inspectIfAnyExceptionInChain(e,
@@ -3062,7 +3071,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 // Ignore TableExistsException as another client might beat us during upgrade.
                 // Ignore AccessDeniedException, as it may be possible underpriviliged user trying to use the connection
                 // which doesn't required upgrade.
-                LOGGER.debug("Ignoring exception while creating mutex table during connection initialization: "
+                LOGGER.debug("Ignoring exception while creating mutex table" +
+                        " during connection initialization: "
                         + Throwables.getStackTraceAsString(e));
             } else {
                 throw e;
@@ -3244,14 +3254,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 List<String> tablesNeedingUpgrade = UpgradeUtil
                   .getPhysicalTablesWithDescRowKey(conn);
                 if (!tablesNeedingUpgrade.isEmpty()) {
-                    LOGGER.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
+                    LOGGER.warn("The following tables require upgrade due to a bug " +
+                            "causing the row key to be incorrect for descending columns " +
+                            "and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
                       + Joiner.on(' ').join(tablesNeedingUpgrade)
                       + "\nTo upgrade issue the \"bin/psql.py -u\" command.");
                 }
                 List<String> unsupportedTables = UpgradeUtil
                   .getPhysicalTablesWithDescVarbinaryRowKey(conn);
                 if (!unsupportedTables.isEmpty()) {
-                    LOGGER.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
+                    LOGGER.warn("The following tables use an unsupported " +
+                            "VARBINARY DESC construct and need to be changed:\n"
                       + Joiner.on(' ').join(unsupportedTables));
                 }
             } catch (Exception ex) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 58b09bf..2b1cfd6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -66,7 +66,8 @@ import com.google.common.collect.Maps;
  */
 public class DefaultStatisticsCollector implements StatisticsCollector {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(DefaultStatisticsCollector.class);
+    private static final Logger LOGGER =
+            LoggerFactory.getLogger(DefaultStatisticsCollector.class);
     
     final Map<ImmutableBytesPtr, Pair<Long, GuidePostsInfoBuilder>> guidePostsInfoWriterMap = Maps.newHashMap();
     private final Table htable;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 5d53c3b..bdc66ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -150,24 +150,28 @@ public class StatisticsScanner implements InternalScanner {
                 ArrayList<Mutation> mutations = new ArrayList<Mutation>();
 
                 if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
+                    LOGGER.debug("Deleting the stats for the region "
+                            + regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations);
                 if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
+                    LOGGER.debug("Adding new stats for the region " +
+                            regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().addStats(tracker, family,
                         mutations, tracker.getGuidePostDepth());
                 if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
+                    LOGGER.debug("Committing new stats for the region " +
+                            regionInfo.getRegionNameAsString()
                             + " as part of major compaction");
                 }
                 getStatisticsWriter().commitStats(mutations, tracker);
             } catch (IOException e) {
                 if (isConnectionClosed()) {
-                    LOGGER.debug("Ignoring error updating statistics because region is closing/closed");
+                    LOGGER.debug(
+                            "Ignoring error updating statistics because region is closing/closed");
                 } else {
                     LOGGER.error("Failed to update statistics table!", e);
                     toThrow = e;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 88cc642..5d8d844 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -222,8 +222,8 @@ public class TraceReader {
             }
         }
         if (cols.size() < count) {
-            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
-                    + " tags from rquest " + request));
+            LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count +
+                    ", but only got " + cols.size() + " tags from rquest " + request));
         }
         return cols;
     }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
index d042fac..9cadab9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
@@ -176,7 +176,8 @@ public class EquiDepthStreamHistogram {
             smallerBar.incrementCount(countToDistribute);
         }
         if (LOGGER.isTraceEnabled()) {
-            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
+            LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s",
+                    origBar, newLeft, newRight));
         }
         bars.remove(origBar);
         bars.add(newLeft);