You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2015/10/28 16:45:54 UTC

[06/14] hive git commit: HIVE-12237 : Use slf4j as logging facade

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java
index 7d7e7c0..20e1147 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hive.ql.lockmgr;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
 import org.apache.hadoop.hive.ql.metadata.*;
@@ -33,7 +33,7 @@ import java.util.concurrent.locks.ReentrantLock;
  */
 public class EmbeddedLockManager implements HiveLockManager {
 
-  private static final Log LOG = LogFactory.getLog("EmbeddedHiveLockManager");
+  private static final Logger LOG = LoggerFactory.getLogger("EmbeddedHiveLockManager");
 
   private final Node root = new Node();
 
@@ -46,41 +46,50 @@ public class EmbeddedLockManager implements HiveLockManager {
   public EmbeddedLockManager() {
   }
 
+  @Override
   public void setContext(HiveLockManagerCtx ctx) throws LockException {
     this.ctx = ctx;
     refresh();
   }
 
+  @Override
   public HiveLock lock(HiveLockObject key, HiveLockMode mode, boolean keepAlive)
       throws LockException {
     return lock(key, mode, numRetriesForLock, sleepTime);
   }
 
+  @Override
   public List<HiveLock> lock(List<HiveLockObj> objs, boolean keepAlive) throws LockException {
     return lock(objs, numRetriesForLock, sleepTime);
   }
 
+  @Override
   public void unlock(HiveLock hiveLock) throws LockException {
     unlock(hiveLock, numRetriesForUnLock, sleepTime);
   }
 
+  @Override
   public void releaseLocks(List<HiveLock> hiveLocks) {
     releaseLocks(hiveLocks, numRetriesForUnLock, sleepTime);
   }
 
+  @Override
   public List<HiveLock> getLocks(boolean verifyTablePartitions, boolean fetchData)
       throws LockException {
     return getLocks(verifyTablePartitions, fetchData, ctx.getConf());
   }
 
+  @Override
   public List<HiveLock> getLocks(HiveLockObject key, boolean verifyTablePartitions,
       boolean fetchData) throws LockException {
     return getLocks(key, verifyTablePartitions, fetchData, ctx.getConf());
   }
 
+  @Override
   public void prepareRetry() {
   }
 
+  @Override
   public void refresh() {
     HiveConf conf = ctx.getConf();
     sleepTime = conf.getTimeVar(
@@ -149,6 +158,7 @@ public class EmbeddedLockManager implements HiveLockManager {
 
   private void sortLocks(List<HiveLockObj> objs) {
     Collections.sort(objs, new Comparator<HiveLockObj>() {
+      @Override
       public int compare(HiveLockObj o1, HiveLockObj o2) {
         int cmp = o1.getName().compareTo(o2.getName());
         if (cmp == 0) {
@@ -186,7 +196,7 @@ public class EmbeddedLockManager implements HiveLockManager {
       try {
         unlock(locked, numRetriesForUnLock, sleepTime);
       } catch (LockException e) {
-        LOG.info(e);
+        LOG.info("Failed to unlock ", e);
       }
     }
   }
@@ -242,6 +252,7 @@ public class EmbeddedLockManager implements HiveLockManager {
     }
   }
 
+  @Override
   public void close() {
     root.lock.lock();
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java
index fbf2a01..6482f3b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.lockmgr.zookeeper;
 
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.ExponentialBackoffRetry;
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper;
 public class CuratorFrameworkSingleton {
   private static HiveConf conf = null;
   private static CuratorFramework sharedClient = null;
-  static final Log LOG = LogFactory.getLog("CuratorFrameworkSingleton");
+  static final Logger LOG = LoggerFactory.getLogger("CuratorFrameworkSingleton");
   static {
     // Add shutdown hook.
     Runtime.getRuntime().addShutdownHook(new Thread() {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
index 7c7a8d1..e10061b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.lockmgr.zookeeper;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
@@ -34,6 +32,8 @@ import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.curator.framework.CuratorFramework;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.net.InetAddress;
 import java.util.*;
@@ -43,7 +43,7 @@ import java.util.regex.Pattern;
 
 public class ZooKeeperHiveLockManager implements HiveLockManager {
   HiveLockManagerCtx ctx;
-  public static final Log LOG = LogFactory.getLog("ZooKeeperHiveLockManager");
+  public static final Logger LOG = LoggerFactory.getLogger("ZooKeeperHiveLockManager");
   static final private LogHelper console = new LogHelper(LOG);
 
   private static CuratorFramework curatorFramework;
@@ -73,6 +73,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
    * @param ctx  The lock manager context (containing the Hive configuration file)
    * Start the ZooKeeper client based on the zookeeper cluster specified in the conf.
    **/
+  @Override
   public void setContext(HiveLockManagerCtx ctx) throws LockException {
     this.ctx = ctx;
     HiveConf conf = ctx.getConf();
@@ -143,6 +144,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
    * Acuire all the locks. Release all the locks and return null if any lock
    * could not be acquired.
    **/
+  @Override
   public List<HiveLock> lock(List<HiveLockObj> lockObjects,
       boolean keepAlive) throws LockException
   {
@@ -208,6 +210,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
    *          list of hive locks to be released Release all the locks specified. If some of the
    *          locks have already been released, ignore them
    **/
+  @Override
   public void releaseLocks(List<HiveLock> hiveLocks) {
     if (hiveLocks != null) {
       int len = hiveLocks.size();
@@ -233,6 +236,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
    *          Whether the lock is to be persisted after the statement Acquire the
    *          lock. Return null if a conflicting lock is present.
    **/
+  @Override
   public ZooKeeperHiveLock lock(HiveLockObject key, HiveLockMode mode,
       boolean keepAlive) throws LockException {
     return lock(key, mode, keepAlive, false);
@@ -429,6 +433,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
   }
 
   /* Remove the lock specified */
+  @Override
   public void unlock(HiveLock hiveLock) throws LockException {
     unlockWithRetry(hiveLock, parent);
   }
@@ -533,12 +538,14 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
   }
 
   /* Get all locks */
+  @Override
   public List<HiveLock> getLocks(boolean verifyTablePartition, boolean fetchData)
     throws LockException {
     return getLocks(ctx.getConf(), null, parent, verifyTablePartition, fetchData);
   }
 
   /* Get all locks for a particular object */
+  @Override
   public List<HiveLock> getLocks(HiveLockObject key, boolean verifyTablePartitions,
                                  boolean fetchData) throws LockException {
     return getLocks(ctx.getConf(), key, parent, verifyTablePartitions, fetchData);
@@ -621,7 +628,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
           }
         }
         obj.setData(data);
-        HiveLock lck = (HiveLock)(new ZooKeeperHiveLock(curChild, obj, mode));
+        HiveLock lck = (new ZooKeeperHiveLock(curChild, obj, mode));
         locks.add(lck);
       }
     }
@@ -659,6 +666,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
   }
 
   /* Release all transient locks, by simply closing the client */
+  @Override
   public void close() throws LockException {
   try {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java
index 282b284..aec0e4d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DummyPartition.java
@@ -23,8 +23,8 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
 /**
@@ -37,8 +37,8 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 public class DummyPartition extends Partition {
 
   @SuppressWarnings("nls")
-  static final private Log LOG = LogFactory
-      .getLog("hive.ql.metadata.DummyPartition");
+  private static final Logger LOG = LoggerFactory
+      .getLogger("hive.ql.metadata.DummyPartition");
 
   private String name;
   private LinkedHashMap<String, String> partSpec;

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index a2dea67..cef297a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.metadata;
 
 import com.google.common.collect.Sets;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -139,7 +139,7 @@ import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
 @SuppressWarnings({"deprecation", "rawtypes"})
 public class Hive {
 
-  static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Hive");
+  static final private Logger LOG = LoggerFactory.getLogger("hive.ql.metadata.Hive");
 
   private HiveConf conf = null;
   private IMetaStoreClient metaStoreClient;
@@ -2647,13 +2647,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
     try {
       destFs = destf.getFileSystem(conf);
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error("Failed to get dest fs", e);
       throw new HiveException(e.getMessage(), e);
     }
     try {
       srcFs = srcf.getFileSystem(conf);
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error("Failed to get dest fs", e);
       throw new HiveException(e.getMessage(), e);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index fa0abad..10fa561 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -25,8 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -46,7 +46,7 @@ import org.apache.thrift.TException;
  */
 public class HiveMetaStoreChecker {
 
-  public static final Log LOG = LogFactory.getLog(HiveMetaStoreChecker.class);
+  public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStoreChecker.class);
 
   private final Hive hive;
   private final HiveConf conf;

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
index 719728d..feb471a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.hive.ql.metadata;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -107,7 +107,7 @@ public final class HiveUtils {
   static final byte[] ctrlABytes = "\u0001".getBytes();
 
 
-  public static final Log LOG = LogFactory.getLog(HiveUtils.class);
+  public static final Logger LOG = LoggerFactory.getLogger(HiveUtils.class);
 
 
   public static Text escapeText(Text text) {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index 9f9b5bc..06f5223 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -27,8 +27,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,8 +56,8 @@ import org.apache.hadoop.mapred.OutputFormat;
 public class Partition implements Serializable {
 
   @SuppressWarnings("nls")
-  static final private Log LOG = LogFactory
-      .getLog("hive.ql.metadata.Partition");
+  private static final Logger LOG = LoggerFactory
+      .getLogger("hive.ql.metadata.Partition");
 
   private Table table;
   private org.apache.hadoop.hive.metastore.api.Partition tPartition;

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 6091c3f..7af9d85 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -31,13 +31,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaHook;
 import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -55,9 +49,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -108,7 +100,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
         deleteTempTableColumnStatsForTable(dbname, name);
       } catch (NoSuchObjectException err){
         // No stats to delete, forgivable error.
-        LOG.info(err);
+        LOG.info("Object not found in metastore", err);
       }
       dropTempTable(table, deleteData, envContext);
       return;
@@ -426,7 +418,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
         deleteTempTableColumnStatsForTable(dbname, tbl_name);
       } catch (NoSuchObjectException err){
         // No stats to delete, forgivable error.
-        LOG.info(err);
+        LOG.info("Object not found in metastore",err);
       }
     }
   }
@@ -536,14 +528,6 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     return ss.getTempTableColStats().get(lookupName);
   }
 
-  private static List<ColumnStatisticsObj> copyColumnStatisticsObjList(Map<String, ColumnStatisticsObj> csoMap) {
-    List<ColumnStatisticsObj> retval = new ArrayList<ColumnStatisticsObj>(csoMap.size());
-    for (ColumnStatisticsObj cso : csoMap.values()) {
-      retval.add(new ColumnStatisticsObj(cso));
-    }
-    return retval;
-  }
-
   private List<ColumnStatisticsObj> getTempTableColumnStats(String dbName, String tableName,
       List<String> colNames) {
     Map<String, ColumnStatisticsObj> tableColStats =

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 3d1ca93..68e0731 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -76,7 +76,7 @@ public class Table implements Serializable {
 
   private static final long serialVersionUID = 1L;
 
-  static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Table");
+  static final private Logger LOG = LoggerFactory.getLogger("hive.ql.metadata.Table");
 
   private org.apache.hadoop.hive.metastore.api.Table tTable;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
index 92dc81c..75c2dd9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
@@ -29,8 +29,8 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -49,7 +49,7 @@ import org.codehaus.jackson.map.ObjectMapper;
  * json.
  */
 public class JsonMetaDataFormatter implements MetaDataFormatter {
-  private static final Log LOG = LogFactory.getLog(JsonMetaDataFormatter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(JsonMetaDataFormatter.class);
 
   /**
    * Convert the map to a JSON string.

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index a9e500a..b5dc0b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -27,8 +27,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -49,7 +49,7 @@ import org.apache.hadoop.hive.ql.session.SessionState;
  * simple lines of text.
  */
 class TextMetaDataFormatter implements MetaDataFormatter {
-  private static final Log LOG = LogFactory.getLog(TextMetaDataFormatter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TextMetaDataFormatter.class);
 
   private static final int separator = Utilities.tabCode;
   private static final int terminator = Utilities.newLineCode;

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
index bc22307..7cf0357 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
@@ -30,8 +30,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -60,8 +58,6 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
  * this transformation does bucket map join optimization.
  */
 abstract public class AbstractBucketJoinProc implements NodeProcessor {
-  private static final Log LOG =
-      LogFactory.getLog(AbstractBucketJoinProc.class.getName());
 
   protected ParseContext pGraphContext;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java
index 843d069..c40caf7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java
@@ -22,8 +22,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
@@ -43,8 +43,8 @@ public class AvgPartitionSizeBasedBigTableSelectorForAutoSMJ
     extends SizeBasedBigTableSelectorForAutoSMJ
     implements BigTableSelectorForAutoSMJ {
 
-  private static final Log LOG = LogFactory
-      .getLog(AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.class.getName());
+  private static final Logger LOG = LoggerFactory
+      .getLogger(AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.class.getName());
 
   public int getBigTablePosition(ParseContext parseCtx, JoinOperator joinOp,
       Set<Integer> bigTableCandidates)

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketJoinProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketJoinProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketJoinProcCtx.java
index d84762e..9b396d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketJoinProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketJoinProcCtx.java
@@ -22,16 +22,16 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 
 public class BucketJoinProcCtx implements NodeProcessorCtx {
-  private static final Log LOG =
-    LogFactory.getLog(BucketJoinProcCtx.class.getName());
+  private static final Logger LOG =
+    LoggerFactory.getLogger(BucketJoinProcCtx.class.getName());
 
   private final HiveConf conf;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
index 6f35b87..750427a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
@@ -23,8 +23,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
@@ -43,7 +43,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
  */
 public class BucketMapJoinOptimizer implements Transform {
 
-  private static final Log LOG = LogFactory.getLog(GroupByOptimizer.class
+  private static final Logger LOG = LoggerFactory.getLogger(GroupByOptimizer.class
       .getName());
 
   public BucketMapJoinOptimizer() {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index e63c527..78bce23 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -28,8 +28,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.CommonJoinOperator;
@@ -83,7 +83,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
  * Factory for generating the different node processors used by ColumnPruner.
  */
 public final class ColumnPrunerProcFactory {
-  protected static final Log LOG = LogFactory.getLog(ColumnPrunerProcFactory.class.getName());
+  protected static final Logger LOG = LoggerFactory.getLogger(ColumnPrunerProcFactory.class.getName());
   private ColumnPrunerProcFactory() {
     // prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
index aacded6..136b5e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
@@ -24,8 +24,8 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
@@ -61,7 +61,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
  */
 public class ConstantPropagate implements Transform {
 
-  private static final Log LOG = LogFactory.getLog(ConstantPropagate.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ConstantPropagate.class);
   protected ParseContext pGraphContext;
   private ConstantPropagateOption constantPropagateOption;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
index d0b10c3..0a61f12 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License. You may obtain a copy of the License at
- * 
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -27,7 +27,8 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
@@ -37,7 +38,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 
 /**
  * This class implements the processor context for Constant Propagate.
- * 
+ *
  * ConstantPropagateProcCtx keeps track of propagated constants in a column->const map for each
  * operator, enabling constants to be revolved across operators.
  */
@@ -49,8 +50,8 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
                // if one of the child conditions is true/false.
   };
 
-  private static final org.apache.commons.logging.Log LOG = LogFactory
-      .getLog(ConstantPropagateProcCtx.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(ConstantPropagateProcCtx.class);
 
   private final Map<Operator<? extends Serializable>, Map<ColumnInfo, ExprNodeDesc>> opToConstantExprs;
   private final Set<Operator<? extends Serializable>> opToDelete;
@@ -73,10 +74,10 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
 
   /**
    * Resolve a ColumnInfo based on given RowResolver.
-   * 
+   *
    * @param ci
    * @param rr
-   * @param parentRR 
+   * @param parentRR
    * @return
    * @throws SemanticException
    */
@@ -104,11 +105,11 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
 
   /**
    * Get propagated constant map from parents.
-   * 
+   *
    * Traverse all parents of current operator, if there is propagated constant (determined by
    * assignment expression like column=constant value), resolve the column using RowResolver and add
    * it to current constant map.
-   * 
+   *
    * @param op
    *        operator getting the propagated constants.
    * @return map of ColumnInfo to ExprNodeDesc. The values of that map must be either

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index 25156b2..b18b5af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -29,8 +29,8 @@ import java.util.Set;
 import java.util.Stack;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -107,7 +107,7 @@ import com.google.common.collect.Lists;
  * Factory for generating the different node processors used by ConstantPropagate.
  */
 public final class ConstantPropagateProcFactory {
-  protected static final Log LOG = LogFactory.getLog(ConstantPropagateProcFactory.class.getName());
+  protected static final Logger LOG = LoggerFactory.getLogger(ConstantPropagateProcFactory.class.getName());
   protected static Set<Class<?>> propagatableUdfs = new HashSet<Class<?>>();
 
   static {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
index e63de7a..ea89cf0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
@@ -27,8 +27,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
@@ -76,7 +76,7 @@ import com.google.common.collect.ImmutableSet;
  */
 public class ConvertJoinMapJoin implements NodeProcessor {
 
-  private static final Log LOG = LogFactory.getLog(ConvertJoinMapJoin.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(ConvertJoinMapJoin.class.getName());
 
   @SuppressWarnings({ "unchecked", "rawtypes" })
   private static final Set<Class<? extends Operator<?>>> COSTLY_OPERATORS =

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
index f475926..292d375 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
@@ -26,8 +26,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
@@ -74,7 +74,7 @@ import org.apache.hadoop.hive.ql.plan.SelectDesc;
  */
 public class DynamicPartitionPruningOptimization implements NodeProcessor {
 
-  static final private Log LOG = LogFactory.getLog(DynamicPartitionPruningOptimization.class
+  static final private Logger LOG = LoggerFactory.getLogger(DynamicPartitionPruningOptimization.class
       .getName());
 
   public static class DynamicPartitionPrunerProc implements NodeProcessor {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
index e5b9c2b..dcdc9ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
@@ -25,8 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
@@ -48,7 +48,7 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
  */
 public class GenMRFileSink1 implements NodeProcessor {
 
-  static final private Log LOG = LogFactory.getLog(GenMRFileSink1.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(GenMRFileSink1.class.getName());
 
   public GenMRFileSink1() {
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index c22c35f..de5cb3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -34,8 +34,8 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -122,10 +122,10 @@ import com.google.common.collect.Interner;
  * map-reduce tasks.
  */
 public final class GenMapRedUtils {
-  private static Log LOG;
+  private static Logger LOG;
 
   static {
-    LOG = LogFactory.getLog("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils");
+    LOG = LoggerFactory.getLogger("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils");
   }
 
   public static boolean needsTagging(ReduceWork rWork) {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
index 41bb84c..6b04d92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
@@ -22,8 +22,8 @@ import java.util.Collection;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
@@ -63,7 +63,7 @@ import com.google.common.collect.Multimap;
  */
 public class GlobalLimitOptimizer implements Transform {
 
-  private final Log LOG = LogFactory.getLog(GlobalLimitOptimizer.class.getName());
+  private final Logger LOG = LoggerFactory.getLogger(GlobalLimitOptimizer.class.getName());
 
   public ParseContext transform(ParseContext pctx) throws SemanticException {
     Context ctx = pctx.getContext();

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
index ce3f59a..f758776 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
@@ -28,8 +28,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -71,7 +71,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
  */
 public class GroupByOptimizer implements Transform {
 
-  private static final Log LOG = LogFactory.getLog(GroupByOptimizer.class
+  private static final Logger LOG = LoggerFactory.getLogger(GroupByOptimizer.class
       .getName());
 
   public GroupByOptimizer() {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 135b90b..114c683 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -27,8 +27,8 @@ import java.util.Stack;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Iterators;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.LateralViewForwardOperator;
@@ -70,7 +70,7 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
  */
 public class IdentityProjectRemover implements Transform {
 
-  private static final Log LOG = LogFactory.getLog(IdentityProjectRemover.class);
+  private static final Logger LOG = LoggerFactory.getLogger(IdentityProjectRemover.class);
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
     // 0. We check the conditions to apply this transformation,

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
index 0b30258..95b7755 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
@@ -25,8 +25,8 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.common.FileUtils;
@@ -58,7 +58,7 @@ import org.apache.hadoop.hive.ql.session.SessionState;
  */
 public final class IndexUtils {
 
-  private static final Log LOG = LogFactory.getLog(IndexWhereProcessor.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcessor.class.getName());
 
   private IndexUtils(){
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index f8f2b7b..b4276e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -29,8 +29,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -92,7 +92,7 @@ public class MapJoinProcessor implements Transform {
   // (column type + column name). The column name is not really used anywhere, but it
   // needs to be passed. Use the string defined below for that.
   private static final String MAPJOINKEY_FIELDPREFIX = "mapjoinkey";
-  private static final Log LOG = LogFactory.getLog(MapJoinProcessor.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(MapJoinProcessor.class.getName());
 
   public MapJoinProcessor() {
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/OperatorComparatorFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/OperatorComparatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/OperatorComparatorFactory.java
index da4d190..1da9164 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/OperatorComparatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/OperatorComparatorFactory.java
@@ -23,8 +23,8 @@ import java.util.Map;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.CollectOperator;
 import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator;
 import org.apache.hadoop.hive.ql.exec.DemuxOperator;
@@ -75,7 +75,7 @@ import org.apache.hadoop.hive.ql.plan.UDTFDesc;
 
 public class OperatorComparatorFactory {
   private static final Map<Class<?>, OperatorComparator> comparatorMapping = Maps.newHashMap();
-  private static final Log LOG = LogFactory.getLog(OperatorComparatorFactory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(OperatorComparatorFactory.class);
 
   static {
     comparatorMapping.put(TableScanOperator.class, new TableScanOperatorComparator());
@@ -549,4 +549,4 @@ public class OperatorComparatorFactory {
     }
     return true;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 25c9618..7ee5081 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -22,8 +22,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc;
 import org.apache.hadoop.hive.ql.optimizer.correlation.CorrelationOptimizer;
@@ -52,7 +52,7 @@ import com.google.common.collect.Sets;
 public class Optimizer {
   private ParseContext pctx;
   private List<Transform> transformations;
-  private static final Log LOG = LogFactory.getLog(Optimizer.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(Optimizer.class.getName());
 
   /**
    * Create the list of transformations.

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
index d83636d..4799b4d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
@@ -29,8 +29,8 @@ import java.util.Set;
 import java.util.Stack;
 
 import org.apache.calcite.util.Pair;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
@@ -73,7 +73,7 @@ import com.google.common.collect.ListMultimap;
  */
 public class PointLookupOptimizer implements Transform {
 
-  private static final Log LOG = LogFactory.getLog(PointLookupOptimizer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PointLookupOptimizer.class);
   private static final String IN_UDF =
           GenericUDFIn.class.getAnnotation(Description.class).name();
   private static final String STRUCT_UDF =

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
index 5d375f6..1fc9d8e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
@@ -23,8 +23,6 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
@@ -50,12 +48,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
  *
  */
 public final class PrunerUtils {
-  private static Log LOG;
-
-  static {
-    LOG = LogFactory.getLog("org.apache.hadoop.hive.ql.optimizer.PrunerUtils");
-  }
-
   private PrunerUtils() {
     //prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
index 757ff5e..d5c3a2d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
@@ -27,8 +27,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
@@ -67,7 +67,7 @@ import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.FIXED;
 
 public class ReduceSinkMapJoinProc implements NodeProcessor {
 
-  private final static Log LOG = LogFactory.getLog(ReduceSinkMapJoinProc.class.getName());
+  private final static Logger LOG = LoggerFactory.getLogger(ReduceSinkMapJoinProc.class.getName());
 
   /* (non-Javadoc)
    * This processor addresses the RS-MJ case that occurs in tez on the small/hash

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
index 1567326..d8b76e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
 import org.apache.hadoop.hive.ql.lib.Node;
@@ -39,7 +39,7 @@ import org.apache.hadoop.hive.ql.plan.DynamicPruningEventDesc;
  */
 public class RemoveDynamicPruningBySize implements NodeProcessor {
 
-  static final private Log LOG = LogFactory.getLog(RemoveDynamicPruningBySize.class.getName());
+  static final private Logger LOG = LoggerFactory.getLogger(RemoveDynamicPruningBySize.class.getName());
 
   @Override
   public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext,

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
index 37f9473..2c473b0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
@@ -27,8 +27,8 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -86,8 +86,8 @@ public class SamplePruner implements Transform {
   }
 
   // The log
-  private static final Log LOG = LogFactory
-      .getLog("hive.ql.optimizer.SamplePruner");
+  private static final Logger LOG = LoggerFactory
+      .getLogger("hive.ql.optimizer.SamplePruner");
 
   /*
    * (non-Javadoc)

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
index e9fdeb0..60a8604 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
@@ -22,8 +22,8 @@ import java.util.Collection;
 import java.util.EnumSet;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
@@ -48,7 +48,7 @@ import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNIFOR
  */
 public class SetReducerParallelism implements NodeProcessor {
 
-  static final private Log LOG = LogFactory.getLog(SetReducerParallelism.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(SetReducerParallelism.class.getName());
 
   @SuppressWarnings("unchecked")
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
index 2af6f9a..588f407 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
@@ -27,8 +27,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -89,7 +89,7 @@ import org.apache.hadoop.mapred.JobConf;
  */
 public class SimpleFetchOptimizer implements Transform {
 
-  private final Log LOG = LogFactory.getLog(SimpleFetchOptimizer.class.getName());
+  private final Logger LOG = LoggerFactory.getLogger(SimpleFetchOptimizer.class.getName());
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java
index dc885ab..e8c7486 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java
@@ -28,8 +28,8 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
@@ -77,7 +77,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
  */
 public class SkewJoinOptimizer implements Transform {
 
-  private static final Log LOG = LogFactory.getLog(SkewJoinOptimizer.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(SkewJoinOptimizer.class.getName());
 
   public static class SkewJoinProc implements NodeProcessor {
     private ParseContext parseContext;

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index d58c24d..e2a0eae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -26,8 +26,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -109,7 +109,7 @@ public class SortedDynPartitionOptimizer implements Transform {
 
   class SortedDynamicPartitionProc implements NodeProcessor {
 
-    private final Log LOG = LogFactory.getLog(SortedDynPartitionOptimizer.class);
+    private final Logger LOG = LoggerFactory.getLogger(SortedDynPartitionOptimizer.class);
     protected ParseContext parseCtx;
 
     public SortedDynamicPartitionProc(ParseContext pCtx) {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java
index 51f1b74..5aeeeb8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java
@@ -23,8 +23,8 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
@@ -46,8 +46,8 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 //try to replace a bucket map join with a sorted merge map join
 public class SortedMergeBucketMapJoinOptimizer implements Transform {
 
-  private static final Log LOG = LogFactory
-      .getLog(SortedMergeBucketMapJoinOptimizer.class.getName());
+  private static final Logger LOG = LoggerFactory
+      .getLogger(SortedMergeBucketMapJoinOptimizer.class.getName());
 
   public SortedMergeBucketMapJoinOptimizer() {
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java
index 3742857..a6bf3af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.lib.Node;
@@ -40,7 +40,7 @@ import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator;
  */
 public class SparkRemoveDynamicPruningBySize implements NodeProcessor {
 
-  static final private Log LOG = LogFactory.getLog(SparkRemoveDynamicPruningBySize.class.getName());
+  static final private Logger LOG = LoggerFactory.getLogger(SparkRemoveDynamicPruningBySize.class.getName());
 
   @Override
   public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext,
@@ -70,4 +70,4 @@ public class SparkRemoveDynamicPruningBySize implements NodeProcessor {
     }
     return false;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index aa204c7..ffe706e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -27,8 +27,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
@@ -94,7 +94,7 @@ public class StatsOptimizer implements Transform {
   //       a time; this could be improved - get all necessary columns in advance, then use local.
   // TODO: [HIVE-6292] aggregations could be done directly in metastore. Hive over MySQL!
 
-  private static final Log Log = LogFactory.getLog(StatsOptimizer.class);
+  private static final Logger Logger = LoggerFactory.getLogger(StatsOptimizer.class);
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
@@ -333,23 +333,23 @@ public class StatsOptimizer implements Transform {
               StatType type = getType(desc.getTypeString());
               if(!tbl.isPartitioned()) {
                 if (!StatsSetupConst.areStatsUptoDate(tbl.getParameters())) {
-                  Log.debug("Stats for table : " + tbl.getTableName() + " are not upto date.");
+                  Logger.debug("Stats for table : " + tbl.getTableName() + " are not upto date.");
                   return null;
                 }
                 rowCnt = Long.parseLong(tbl.getProperty(StatsSetupConst.ROW_COUNT));
                 if (rowCnt < 1) {
-                  Log.debug("Table doesn't have upto date stats " + tbl.getTableName());
+                  Logger.debug("Table doesn't have upto date stats " + tbl.getTableName());
                   return null;
                 }
                 List<ColumnStatisticsObj> stats = hive.getMSC().getTableColumnStatistics(
                     tbl.getDbName(),tbl.getTableName(), Lists.newArrayList(colName));
                 if (stats.isEmpty()) {
-                  Log.debug("No stats for " + tbl.getTableName() + " column " + colName);
+                  Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
                   return null;
                 }
                 Long nullCnt = getNullcountFor(type, stats.get(0).getStatsData());
                 if (null == nullCnt) {
-                  Log.debug("Unsupported type: " + desc.getTypeString() + " encountered in " +
+                  Logger.debug("Unsupported type: " + desc.getTypeString() + " encountered in " +
                       "metadata optimizer for column : " + colName);
                   return null;
                 } else {
@@ -360,13 +360,13 @@ public class StatsOptimizer implements Transform {
                     tsOp.getConf().getAlias(), tsOp).getPartitions();
                 for (Partition part : parts) {
                   if (!StatsSetupConst.areStatsUptoDate(part.getParameters())) {
-                    Log.debug("Stats for part : " + part.getSpec() + " are not upto date.");
+                    Logger.debug("Stats for part : " + part.getSpec() + " are not upto date.");
                     return null;
                   }
                   Long partRowCnt = Long.parseLong(part.getParameters()
                       .get(StatsSetupConst.ROW_COUNT));
                   if (partRowCnt < 1) {
-                    Log.debug("Partition doesn't have upto date stats " + part.getSpec());
+                    Logger.debug("Partition doesn't have upto date stats " + part.getSpec());
                     return null;
                   }
                   rowCnt += partRowCnt;
@@ -381,7 +381,7 @@ public class StatsOptimizer implements Transform {
                   if (statData == null) return null;
                   Long nullCnt = getNullcountFor(type, statData);
                   if (nullCnt == null) {
-                    Log.debug("Unsupported type: " + desc.getTypeString() + " encountered in " +
+                    Logger.debug("Unsupported type: " + desc.getTypeString() + " encountered in " +
                         "metadata optimizer for column : " + colName);
                     return null;
                   } else {
@@ -397,13 +397,13 @@ public class StatsOptimizer implements Transform {
             StatType type = getType(colDesc.getTypeString());
             if(!tbl.isPartitioned()) {
               if (!StatsSetupConst.areStatsUptoDate(tbl.getParameters())) {
-                Log.debug("Stats for table : " + tbl.getTableName() + " are not upto date.");
+                Logger.debug("Stats for table : " + tbl.getTableName() + " are not upto date.");
                 return null;
               }
               List<ColumnStatisticsObj> stats = hive.getMSC().getTableColumnStatistics(
                   tbl.getDbName(),tbl.getTableName(), Lists.newArrayList(colName));
               if (stats.isEmpty()) {
-                Log.debug("No stats for " + tbl.getTableName() + " column " + colName);
+                Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
                 return null;
               }
               ColumnStatisticsData statData = stats.get(0).getStatsData();
@@ -431,7 +431,7 @@ public class StatsOptimizer implements Transform {
                 }
                 default:
                   // unsupported type
-                  Log.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
+                  Logger.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
                       "metadata optimizer for column : " + colName);
                   return null;
               }
@@ -493,7 +493,7 @@ public class StatsOptimizer implements Transform {
                   break;
                 }
                 default:
-                  Log.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
+                  Logger.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
                       "metadata optimizer for column : " + colName);
                   return null;
               }
@@ -504,7 +504,7 @@ public class StatsOptimizer implements Transform {
             StatType type = getType(colDesc.getTypeString());
             if (!tbl.isPartitioned()) {
               if (!StatsSetupConst.areStatsUptoDate(tbl.getParameters())) {
-                Log.debug("Stats for table : " + tbl.getTableName() + " are not upto date.");
+                Logger.debug("Stats for table : " + tbl.getTableName() + " are not upto date.");
                 return null;
               }
               ColumnStatisticsData statData = hive.getMSC().getTableColumnStatistics(
@@ -533,7 +533,7 @@ public class StatsOptimizer implements Transform {
                   break;
                 }
                 default: // unsupported type
-                  Log.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
+                  Logger.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
                       "metadata optimizer for column : " + colName);
                   return null;
               }
@@ -594,14 +594,14 @@ public class StatsOptimizer implements Transform {
                   break;
                 }
                 default: // unsupported type
-                  Log.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
+                  Logger.debug("Unsupported type: " + colDesc.getTypeString() + " encountered in " +
                       "metadata optimizer for column : " + colName);
                   return null;
 
               }
             }
           } else { // Unsupported aggregation.
-            Log.debug("Unsupported aggregation for metadata optimizer: "
+            Logger.debug("Unsupported aggregation for metadata optimizer: "
                 + aggr.getGenericUDAFName());
             return null;
           }
@@ -645,17 +645,17 @@ public class StatsOptimizer implements Transform {
       } catch (Exception e) {
         // this is best effort optimization, bail out in error conditions and
         // try generate and execute slower plan
-        Log.debug("Failed to optimize using metadata optimizer", e);
+        Logger.debug("Failed to optimize using metadata optimizer", e);
         return null;
       }
     }
 
     private ColumnStatisticsData validateSingleColStat(List<ColumnStatisticsObj> statObj) {
       if (statObj.size() > 1) {
-        Log.error("More than one stat for a single column!");
+        Logger.error("More than one stat for a single column!");
         return null;
       } else if (statObj.isEmpty()) {
-        Log.debug("No stats for some partition and column");
+        Logger.debug("No stats for some partition and column");
         return null;
       }
       return statObj.get(0).getStatsData();
@@ -666,7 +666,7 @@ public class StatsOptimizer implements Transform {
       List<String> partNames = new ArrayList<String>(parts.size());
       for (Partition part : parts) {
         if (!StatsSetupConst.areStatsUptoDate(part.getParameters())) {
-          Log.debug("Stats for part : " + part.getSpec() + " are not upto date.");
+          Logger.debug("Stats for part : " + part.getSpec() + " are not upto date.");
           return null;
         }
         partNames.add(part.getName());
@@ -674,7 +674,7 @@ public class StatsOptimizer implements Transform {
       Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
           tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName));
       if (result.size() != parts.size()) {
-        Log.debug("Received " + result.size() + " stats for " + parts.size() + " partitions");
+        Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions");
         return null;
       }
       return result.values();
@@ -691,7 +691,7 @@ public class StatsOptimizer implements Transform {
           }
           long partRowCnt = Long.parseLong(part.getParameters().get(StatsSetupConst.ROW_COUNT));
           if (partRowCnt < 1) {
-            Log.debug("Partition doesn't have upto date stats " + part.getSpec());
+            Logger.debug("Partition doesn't have upto date stats " + part.getSpec());
             return null;
           }
           rowCnt += partRowCnt;
@@ -704,7 +704,7 @@ public class StatsOptimizer implements Transform {
         if (rowCnt < 1) {
           // if rowCnt < 1 than its either empty table or table on which stats are not
           //  computed We assume the worse and don't attempt to optimize.
-          Log.debug("Table doesn't have upto date stats " + tbl.getTableName());
+          Logger.debug("Table doesn't have upto date stats " + tbl.getTableName());
           rowCnt = null;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 8e6621a..1cccc77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -54,8 +54,8 @@ import org.apache.calcite.sql.validate.SqlValidatorUtil;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin;
@@ -80,7 +80,7 @@ import com.google.common.collect.Sets;
 
 public class HiveCalciteUtil {
 
-  private static final Log LOG = LogFactory.getLog(HiveCalciteUtil.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HiveCalciteUtil.class);
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index 0e282b8..b4e7d47 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -16,8 +16,8 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 
 public class HiveRelOptUtil extends RelOptUtil {
 
-  private static final Log LOG = LogFactory.getLog(HiveRelOptUtil.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HiveRelOptUtil.class);
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
index 1bd241b..cce3588 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
@@ -39,8 +39,8 @@ import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -79,8 +79,8 @@ public class RelOptHiveTable extends RelOptAbstractTable {
   Map<String, PrunedPartitionList>                partitionCache;
   AtomicInteger                                   noColsMissingStats;
 
-  protected static final Log                      LOG             = LogFactory
-                                                                      .getLog(RelOptHiveTable.class
+  protected static final Logger                      LOG             = LoggerFactory
+                                                                      .getLogger(RelOptHiveTable.class
                                                                           .getName());
 
   public RelOptHiveTable(RelOptSchema calciteSchema, String qualifiedTblName,

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
index 4e3b654..d15d885 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
@@ -23,8 +23,8 @@ import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelDistribution;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
@@ -36,7 +36,7 @@ import com.google.common.collect.ImmutableList;
  */
 public abstract class HiveCostModel {
 
-  private static final Log LOG = LogFactory.getLog(HiveCostModel.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HiveCostModel.class);
 
   private final Set<JoinAlgorithm> joinAlgorithms;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
index e9f1d96..61a3a64 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
@@ -29,8 +29,8 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.calcite.util.Pair;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
@@ -51,7 +51,7 @@ public class HiveOnTezCostModel extends HiveCostModel {
 
   private static HiveAlgorithmsUtil algoUtils;
 
-  private static transient final Log LOG = LogFactory.getLog(HiveOnTezCostModel.class);
+  private static transient final Logger LOG = LoggerFactory.getLogger(HiveOnTezCostModel.class);
 
   synchronized public static HiveOnTezCostModel getCostModel(HiveConf conf) {
     if (INSTANCE == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/55337444/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
index 39c69a4..d6e3915 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
@@ -30,8 +30,8 @@ import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Exchange;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rex.RexNode;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
@@ -55,8 +55,8 @@ import com.google.common.collect.Sets;
  */
 public class HiveInsertExchange4JoinRule extends RelOptRule {
 
-  protected static transient final Log LOG = LogFactory
-      .getLog(HiveInsertExchange4JoinRule.class);
+  protected static transient final Logger LOG = LoggerFactory
+      .getLogger(HiveInsertExchange4JoinRule.class);
 
   /** Rule that creates Exchange operators under a MultiJoin operator. */
   public static final HiveInsertExchange4JoinRule EXCHANGE_BELOW_MULTIJOIN =