You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/30 21:17:10 UTC

[01/12] hive git commit: HIVE-16755 : LLAP IO: incorrect assert may trigger in tests (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Repository: hive
Updated Branches:
  refs/heads/hive-14535 52e0f8f34 -> 144efb0fc


HIVE-16755 : LLAP IO: incorrect assert may trigger in tests (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4a7bc89f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4a7bc89f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4a7bc89f

Branch: refs/heads/hive-14535
Commit: 4a7bc89f92244a3079f57e5c0b0fbd86f7fb7ec8
Parents: 2fa4dc2
Author: sergey <se...@apache.org>
Authored: Thu May 25 15:58:56 2017 -0700
Committer: sergey <se...@apache.org>
Committed: Thu May 25 15:58:56 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4a7bc89f/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 03bc3ce..9693826 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -378,7 +378,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
                 orcReader.getSchema(), orcReader.getWriterVersion());
             counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTimeHdfs);
             if (hasFileId && metadataCache != null) {
-              stripeMetadata = metadataCache.putStripeMetadata(stripeMetadata);
+              OrcStripeMetadata newMetadata = metadataCache.putStripeMetadata(stripeMetadata);
+              isFoundInCache = newMetadata != stripeMetadata; // May be cached concurrently.
+              stripeMetadata = newMetadata;
               if (LlapIoImpl.ORC_LOGGER.isTraceEnabled()) {
                 LlapIoImpl.ORC_LOGGER.trace("Caching stripe {} metadata with includes: {}",
                     stripeKey.stripeIx, DebugUtils.toString(globalIncludes));


[09/12] hive git commit: HIVE-16779 : achedStore leak PersistenceManager resources (Daniel Dai, via Thejas Nair)

Posted by we...@apache.org.
HIVE-16779 : achedStore leak PersistenceManager resources (Daniel Dai, via Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bbf0629a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bbf0629a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bbf0629a

Branch: refs/heads/hive-14535
Commit: bbf0629a5a2e43531c4fd5e17d727497e89d267d
Parents: a18e772
Author: Thejas M Nair <th...@hortonworks.com>
Authored: Sun May 28 11:44:59 2017 -0700
Committer: Thejas M Nair <th...@hortonworks.com>
Committed: Sun May 28 11:44:59 2017 -0700

----------------------------------------------------------------------
 .../hive/metastore/cache/CachedStore.java       | 25 ++++++++++++++------
 .../hadoop/hive/ql/session/SessionState.java    |  5 +++-
 2 files changed, 22 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bbf0629a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index f00f08f..590e9ce 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -116,7 +116,7 @@ public class CachedStore implements RawStore, Configurable {
   private static ReentrantReadWriteLock partitionColStatsCacheLock = new ReentrantReadWriteLock(
       true);
   private static AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false);
-  RawStore rawStore;
+  RawStore rawStore = null;
   Configuration conf;
   private PartitionExpressionProxy expressionProxy = null;
   // Default value set to 100 milliseconds for test purpose
@@ -197,11 +197,13 @@ public class CachedStore implements RawStore, Configurable {
   public void setConf(Configuration conf) {
     String rawStoreClassName = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_IMPL,
         ObjectStore.class.getName());
-    try {
-      rawStore = ((Class<? extends RawStore>) MetaStoreUtils.getClass(
-          rawStoreClassName)).newInstance();
-    } catch (Exception e) {
-      throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+    if (rawStore == null) {
+      try {
+        rawStore = ((Class<? extends RawStore>) MetaStoreUtils.getClass(
+            rawStoreClassName)).newInstance();
+      } catch (Exception e) {
+        throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+      }
     }
     rawStore.setConf(conf);
     Configuration oldConf = this.conf;
@@ -330,8 +332,9 @@ public class CachedStore implements RawStore, Configurable {
       String rawStoreClassName =
           HiveConf.getVar(cachedStore.conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_IMPL,
               ObjectStore.class.getName());
+      RawStore rawStore = null;
       try {
-        RawStore rawStore =
+        rawStore =
             ((Class<? extends RawStore>) MetaStoreUtils.getClass(rawStoreClassName)).newInstance();
         rawStore.setConf(cachedStore.conf);
         List<String> dbNames = rawStore.getAllDatabases();
@@ -356,6 +359,14 @@ public class CachedStore implements RawStore, Configurable {
         LOG.error("Updating CachedStore: error getting database names", e);
       } catch (InstantiationException | IllegalAccessException e) {
         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+      } finally {
+        try {
+          if (rawStore != null) {
+            rawStore.shutdown();
+          }
+        } catch (Exception e) {
+          LOG.error("Error shutting down RawStore", e);
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/bbf0629a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 479a938..d7592bb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.cache.CachedStore;
 import org.apache.hadoop.hive.ql.MapRedStats;
 import org.apache.hadoop.hive.ql.exec.Registry;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -1684,7 +1685,9 @@ public class SessionState {
       Hive threadLocalHive = Hive.get(sessionConf);
       if ((threadLocalHive != null) && (threadLocalHive.getMSC() != null)
           && (threadLocalHive.getMSC().isLocalMetaStore())) {
-        if (sessionConf.getVar(ConfVars.METASTORE_RAW_STORE_IMPL).equals(ObjectStore.class.getName())) {
+        if (sessionConf.getVar(ConfVars.METASTORE_RAW_STORE_IMPL).equals(ObjectStore.class.getName())
+            || sessionConf.getVar(ConfVars.METASTORE_RAW_STORE_IMPL).equals(CachedStore.class.getName()) &&
+            sessionConf.getVar(ConfVars.METASTORE_CACHED_RAW_STORE_IMPL).equals(ObjectStore.class.getName())) {
           ObjectStore.unCacheDataNucleusClassLoaders();
         }
       }


[07/12] hive git commit: HIVE-16765: ParquetFileReader should be closed to avoid resource leak (Colin Ma, reviewed by Ferdinand Xu)

Posted by we...@apache.org.
HIVE-16765: ParquetFileReader should be closed to avoid resource leak (Colin Ma, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3330403a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3330403a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3330403a

Branch: refs/heads/hive-14535
Commit: 3330403a7dea206c85828dfd5319a7ae0d9bcef5
Parents: ca80968
Author: Ferdinand Xu <ch...@intel.com>
Authored: Sat May 27 07:35:40 2017 +0800
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Sat May 27 07:35:40 2017 +0800

----------------------------------------------------------------------
 .../hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java  | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3330403a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
index 96d3847..65b4398 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
@@ -259,6 +259,9 @@ public class VectorizedParquetRecordReader extends ParquetRecordReaderBase
 
   @Override
   public void close() throws IOException {
+    if (reader != null) {
+      reader.close();
+    }
   }
 
   @Override


[12/12] hive git commit: HIVE-14671 : Merge branch 'master' into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
HIVE-14671 : Merge branch 'master' into hive-14535 (Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/144efb0f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/144efb0f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/144efb0f

Branch: refs/heads/hive-14535
Commit: 144efb0fcc8b74484c860e3559666a313641c93d
Parents: 52e0f8f 4cd4251
Author: Wei Zheng <we...@apache.org>
Authored: Tue May 30 14:16:03 2017 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Tue May 30 14:16:03 2017 -0700

----------------------------------------------------------------------
 .../hive/common/jsonexplain/DagJsonParser.java  |   6 +-
 .../java/org/apache/hive/http/HttpServer.java   |   1 +
 .../hive/http/Log4j2ConfiguratorServlet.java    | 275 +++++++++++++++++++
 .../hive/druid/DruidStorageHandlerUtils.java    |  34 ++-
 .../hadoop/hive/druid/io/DruidOutputFormat.java |   7 +-
 .../hadoop/hive/druid/io/DruidRecordWriter.java |   2 +-
 .../serde/HiveDruidSerializationModule.java     |  37 +++
 .../serde/PeriodGranularitySerializer.java      |  54 ++++
 .../hive/druid/DerbyConnectorTestUtility.java   |   4 +-
 .../hadoop/hive/druid/TestDruidSerDe.java       |   2 +-
 .../hive/druid/TestDruidStorageHandler.java     |   2 +-
 .../TestHiveDruidQueryBasedInputFormat.java     |  38 +--
 .../hive/ql/io/TestDruidRecordWriter.java       |  11 +-
 .../listener/DbNotificationListener.java        |  11 +-
 .../listener/TestDbNotificationListener.java    |  28 +-
 .../hive/ql/parse/TestReplicationScenarios.java | 130 +++++++++
 .../apache/hadoop/hive/llap/LlapDaemonInfo.java |  14 +-
 llap-server/bin/runLlapDaemon.sh                |   2 +-
 .../hive/llap/daemon/impl/LlapDaemon.java       |   8 +-
 .../hadoop/hive/llap/daemon/impl/QueryInfo.java |   2 +
 .../llap/daemon/impl/TaskExecutorService.java   |  42 ++-
 .../llap/daemon/impl/TaskRunnerCallable.java    |  14 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |   4 +-
 .../hive/llap/metrics/LlapDaemonJvmInfo.java    |   2 +
 .../hive/llap/metrics/LlapDaemonJvmMetrics.java |  35 ++-
 .../hive/llap/daemon/MiniLlapCluster.java       |   2 +-
 .../llap/tezplugins/LlapTaskCommunicator.java   |   5 +-
 .../upgrade/mysql/041-HIVE-16556.mysql.sql      |   2 +-
 .../hive/metastore/cache/CachedStore.java       |  25 +-
 .../hive/metastore/events/InsertEvent.java      |  44 ++-
 .../hive/metastore/messaging/InsertMessage.java |  24 +-
 .../metastore/messaging/MessageFactory.java     |  10 +-
 .../messaging/json/JSONInsertMessage.java       |  51 +++-
 .../messaging/json/JSONMessageFactory.java      |  10 +-
 pom.xml                                         |   2 +-
 .../hive/ql/exec/AppMasterEventOperator.java    |  14 +-
 .../hadoop/hive/ql/exec/CommonJoinOperator.java |   2 +-
 .../hadoop/hive/ql/exec/DemuxOperator.java      |  26 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  20 +-
 .../hadoop/hive/ql/exec/GroupByOperator.java    |  17 +-
 .../hive/ql/exec/HashTableSinkOperator.java     |   4 +-
 .../hadoop/hive/ql/exec/JoinOperator.java       |   2 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |   8 +-
 .../apache/hadoop/hive/ql/exec/MapOperator.java |  10 +-
 .../apache/hadoop/hive/ql/exec/MuxOperator.java |   8 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    |  49 ++--
 .../hive/ql/exec/OrcFileMergeOperator.java      |   6 +-
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |  12 +-
 .../hadoop/hive/ql/exec/SMBMapJoinOperator.java |   8 +-
 .../hadoop/hive/ql/exec/ScriptOperator.java     |   8 +-
 .../hadoop/hive/ql/exec/SelectOperator.java     |   2 +-
 .../hadoop/hive/ql/exec/TableScanOperator.java  |   8 +-
 .../hadoop/hive/ql/exec/UnionOperator.java      |   2 +-
 .../hadoop/hive/ql/exec/mr/ExecReducer.java     |  12 +-
 .../hadoop/hive/ql/exec/mr/ObjectCache.java     |   5 +-
 .../ql/exec/spark/SparkMapRecordHandler.java    |   7 +-
 .../ql/exec/spark/SparkReduceRecordHandler.java |   9 +-
 .../ql/exec/tez/ColumnarSplitSizeEstimator.java |   5 +-
 .../tez/HostAffinitySplitLocationProvider.java  |   2 +-
 .../hive/ql/exec/tez/LlapObjectCache.java       |  12 +-
 .../hive/ql/exec/tez/RecordProcessor.java       |   6 -
 .../mapjoin/VectorMapJoinCommonOperator.java    |  12 +-
 .../VectorMapJoinGenerateResultOperator.java    |   8 +-
 .../VectorMapJoinInnerBigOnlyLongOperator.java  |   8 +-
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java |   8 +-
 ...VectorMapJoinInnerBigOnlyStringOperator.java |   8 +-
 .../mapjoin/VectorMapJoinInnerLongOperator.java |   8 +-
 .../VectorMapJoinInnerMultiKeyOperator.java     |   8 +-
 .../VectorMapJoinInnerStringOperator.java       |   8 +-
 .../VectorMapJoinLeftSemiLongOperator.java      |   8 +-
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  |   8 +-
 .../VectorMapJoinLeftSemiStringOperator.java    |   8 +-
 ...ectorMapJoinOuterGenerateResultOperator.java |  12 +-
 .../mapjoin/VectorMapJoinOuterLongOperator.java |  10 +-
 .../VectorMapJoinOuterMultiKeyOperator.java     |  10 +-
 .../VectorMapJoinOuterStringOperator.java       |  10 +-
 .../fast/VectorMapJoinFastBytesHashTable.java   |   6 +-
 .../fast/VectorMapJoinFastLongHashTable.java    |   6 +-
 .../VectorReduceSinkCommonOperator.java         |   8 +-
 .../hadoop/hive/ql/io/orc/ExternalCache.java    |   3 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  11 +-
 .../vector/VectorizedParquetRecordReader.java   |   3 +
 .../stats/annotation/StatsRulesProcFactory.java |  56 ++--
 .../parse/repl/dump/events/InsertHandler.java   |  28 +-
 .../hadoop/hive/ql/session/SessionState.java    |   5 +-
 .../results/clientpositive/llap/cte_mat_3.q.out |   2 +-
 .../results/clientpositive/llap/cte_mat_4.q.out |   4 +-
 .../results/clientpositive/llap/cte_mat_5.q.out |   2 +-
 .../spark/spark_explainuser_1.q.out             |  24 +-
 .../hadoop/hive/serde2/lazy/LazyBinary.java     |   5 +-
 90 files changed, 1022 insertions(+), 479 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 351059b,3e09432..004aaf7
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@@ -305,90 -265,8 +303,90 @@@ public class FileSinkOperator extends T
        }
      }
  
 -    public Stat getStat() {
 -      return stat;
 +    public void configureDynPartPath(String dirName, String childSpecPathDynLinkedPartitions) {
 +      dirName = (childSpecPathDynLinkedPartitions == null) ? dirName :
 +        dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions;
 +      tmpPath = new Path(tmpPath, dirName);
 +      if (taskOutputTempPath != null) {
 +        taskOutputTempPath = new Path(taskOutputTempPath, dirName);
 +      }
 +    }
 +
 +    public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeTable,
 +        boolean isSkewedStoredAsSubDirectories) {
 +      if (isNativeTable) {
 +        String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat);
 +        if (!isMmTable) {
 +          if (!bDynParts && !isSkewedStoredAsSubDirectories) {
 +            finalPaths[filesIdx] = getFinalPath(taskId, parent, extension);
 +          } else {
 +            finalPaths[filesIdx] = getFinalPath(taskId, tmpPath, extension);
 +          }
 +          outPaths[filesIdx] = getTaskOutPath(taskId);
 +        } else {
 +          String subdirPath = AcidUtils.deltaSubdir(txnId, txnId, stmtId);
 +          if (unionPath != null) {
 +            // Create the union directory inside the MM directory.
 +            subdirPath += Path.SEPARATOR + unionPath;
 +          }
 +          subdirPath += Path.SEPARATOR + taskId;
 +          if (conf.isMerge()) {
 +            // Make sure we don't collide with the source files.
 +            // MM tables don't support concat so we don't expect the merge of merged files.
 +            subdirPath += ".merged";
 +          }
 +          Path finalPath = null;
 +          if (!bDynParts && !isSkewedStoredAsSubDirectories) {
 +            finalPath = getFinalPath(subdirPath, specPath, extension);
 +          } else {
 +            // Note: tmpPath here has the correct partition key
 +            finalPath = getFinalPath(subdirPath, tmpPath, extension);
 +          }
 +          // In the cases that have multi-stage insert, e.g. a "hive.skewjoin.key"-based skew join,
 +          // it can happen that we want multiple commits into the same directory from different
 +          // tasks (not just task instances). In non-MM case, Utilities.renameOrMoveFiles ensures
 +          // unique names. We could do the same here, but this will still cause the old file to be
 +          // deleted because it has not been committed in /this/ FSOP. We are going to fail to be
 +          // safe. Potentially, we could implement some partial commit between stages, if this
 +          // affects some less obscure scenario.
 +          try {
 +            FileSystem fpfs = finalPath.getFileSystem(hconf);
 +            if (fpfs.exists(finalPath)) throw new RuntimeException(finalPath + " already exists");
 +          } catch (IOException e) {
 +            throw new RuntimeException(e);
 +          }
 +          finalPaths[filesIdx] = finalPath;
 +          outPaths[filesIdx] = finalPath;
 +        }
-         if (isInfoEnabled) {
++        if (LOG.isInfoEnabled()) {
 +          LOG.info("Final Path: FS " + finalPaths[filesIdx]);
-           if (isInfoEnabled && !isMmTable) {
++          if (LOG.isInfoEnabled() && !isMmTable) {
 +            LOG.info("Writing to temp file: FS " + outPaths[filesIdx]);
 +          }
 +        }
 +      } else {
 +        finalPaths[filesIdx] = outPaths[filesIdx] = specPath;
 +      }
 +    }
 +
 +    public Path getTmpPath() {
 +      return tmpPath;
 +    }
 +
 +    public Path getTaskOutputTempPath() {
 +      return taskOutputTempPath;
 +    }
 +
 +    public void addToStat(String statType, long amount) {
 +      if ("rowCount".equals(statType)) {
 +        Utilities.LOG14535.info("Adding " + statType + " = " + amount + " to " + System.identityHashCode(this));
 +      }
 +      stat.addToStat(statType, amount);
 +    }
 +
 +    public Collection<String> getStoredStats() {
 +      Utilities.LOG14535.info("Getting stats from " + System.identityHashCode(this));
 +      return stat.getStoredStats();
      }
    } // class FSPaths
  
@@@ -715,12 -578,30 +713,12 @@@
    protected void createBucketForFileIdx(FSPaths fsp, int filesIdx)
        throws HiveException {
      try {
 -      if (isNativeTable) {
 -        fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null);
 -        if (LOG.isInfoEnabled()) {
 -          LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]);
 -        }
 -        fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId);
 -        if (LOG.isInfoEnabled()) {
 -          LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]);
 -        }
 -      } else {
 -        fsp.finalPaths[filesIdx] = fsp.outPaths[filesIdx] = specPath;
 -      }
 -      // The reason to keep these instead of using
 -      // OutputFormat.getRecordWriter() is that
 -      // getRecordWriter does not give us enough control over the file name that
 -      // we create.
 -      String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat);
 -      if (!bDynParts && !this.isSkewedStoredAsSubDirectories) {
 -        fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, parent, extension);
 -      } else {
 -        fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension);
 -      }
 +      fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable, isSkewedStoredAsSubDirectories);
 +      Utilities.LOG14535.info("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx]
 +          + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path "
 +          + fsp.getTmpPath() + ", task " + taskId + ")"/*, new Exception()*/);
  
-       if (isInfoEnabled) {
+       if (LOG.isInfoEnabled()) {
          LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]);
        }
  
@@@ -860,12 -736,12 +858,12 @@@
        if (conf.isGatherStats() && !isCollectRWStats) {
          SerDeStats stats = serializer.getSerDeStats();
          if (stats != null) {
 -          fpaths.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
 +          fpaths.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
          }
 -        fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1);
 +        fpaths.addToStat(StatsSetupConst.ROW_COUNT, 1);
        }
  
-       if ((++numRows == cntr) && isLogInfoEnabled) {
+       if ((++numRows == cntr) && LOG.isInfoEnabled()) {
          cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
          if (cntr < 0 || numRows < 0) {
            cntr = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index a845b50,d9547b9..44e50e2
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@@ -130,9 -126,8 +130,9 @@@ public class OrcFileMergeOperator exten
            options.bufferSize(compressBuffSize).enforceBufferSize();
          }
  
 +        Path outPath = getOutPath();
          outWriter = OrcFile.createWriter(outPath, options);
-         if (isLogDebugEnabled) {
+         if (LOG.isDebugEnabled()) {
            LOG.info("ORC merge file output path: " + outPath);
          }
        }

http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/144efb0f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------


[05/12] hive git commit: HIVE-16285: Servlet for dynamically configuring log levels (Prasanth Jayachandran reviewed by Siddharth Seth, Gopal V)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
index 43f3951..84edff2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
@@ -154,7 +154,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -212,7 +212,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]);
@@ -222,7 +222,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -377,7 +377,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
index 95fb0c2..7fe875b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
@@ -161,7 +161,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -227,7 +227,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]);
@@ -237,7 +237,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -386,7 +386,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
index 044e3e6..3869b91 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
@@ -145,7 +145,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -205,7 +205,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]);
@@ -215,7 +215,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -363,7 +363,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
index c85e1d8..b88a14d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
@@ -152,7 +152,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -210,7 +210,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerRepeated(batch, joinResult, hashMapResults[0]);
@@ -220,7 +220,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -374,7 +374,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
index a108cd0..6dc6be8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
@@ -158,7 +158,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -224,7 +224,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerRepeated(batch, joinResult, hashMapResults[0]);
@@ -234,7 +234,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -382,7 +382,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
index 3211d7d..64e4f9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
@@ -143,7 +143,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -199,7 +199,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerRepeated(batch, joinResult, hashMapResults[0]);
@@ -209,7 +209,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -356,7 +356,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
index b02e6fd..2a3f8b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
@@ -154,7 +154,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -212,7 +212,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]);
@@ -222,7 +222,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -370,7 +370,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " spills " + intArrayToRangesString(spills, spillCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
index 36b8f3f..2c7c30c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
@@ -160,7 +160,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -227,7 +227,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]);
@@ -237,7 +237,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -382,7 +382,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " spills " + intArrayToRangesString(spills, spillCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
index 0b3de0a..e00dfc7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
@@ -145,7 +145,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -202,7 +202,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]);
@@ -212,7 +212,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -353,7 +353,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " spills " + intArrayToRangesString(spills, spillCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
index 0e2d65a..1b1a3db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
@@ -442,7 +442,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
       int nonSpillCount = subtractFromInputSelected(
               inputSelectedInUse, inputLogicalSize, spills, spillCount, nonSpills);
 
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("finishOuter spillCount > 0" +
             " nonSpills " + intArrayToRangesString(nonSpills, nonSpillCount));
       }
@@ -458,7 +458,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         noMatchCount = subtract(nonSpills, nonSpillCount, allMatchs, allMatchCount,
                 noMatchs);
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("finishOuter spillCount > 0" +
               " noMatchs " + intArrayToRangesString(noMatchs, noMatchCount));
         }
@@ -473,7 +473,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         noMatchCount = subtractFromInputSelected(
             inputSelectedInUse, inputLogicalSize, allMatchs, allMatchCount, noMatchs);
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("finishOuter spillCount == 0" +
               " noMatchs " + intArrayToRangesString(noMatchs, noMatchCount));
         }
@@ -507,7 +507,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
       batch.size = numSel;
       batch.selectedInUse = true;
 
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("finishOuter allMatchCount > 0" +
             " batch.selected " + intArrayToRangesString(batch.selected, batch.size));
       }
@@ -525,7 +525,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         int mergeCount = sortMerge(
                 noMatchs, noMatchCount, batch.selected, batch.size, merged);
     
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("finishOuter noMatchCount > 0 && batch.size > 0" +
               " merged " + intArrayToRangesString(merged, mergeCount));
         }
@@ -543,7 +543,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         batch.size = noMatchCount;
         batch.selectedInUse = true;
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("finishOuter noMatchCount > 0 && batch.size == 0" +
               " batch.selected " + intArrayToRangesString(batch.selected, batch.size));
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
index 72309e8..cb0ec96 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
@@ -144,7 +144,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -173,7 +173,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
           ve.evaluate(batch);
         }
         someRowsFilteredOut = (batch.size != inputLogicalSize);
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           if (batch.selectedInUse) {
             if (inputSelectedInUse) {
               LOG.debug(CLASS_NAME +
@@ -246,7 +246,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut,
@@ -257,7 +257,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -427,7 +427,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
index a4fc7d3..4d9c302 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
@@ -149,7 +149,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -178,7 +178,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
           ve.evaluate(batch);
         }
         someRowsFilteredOut = (batch.size != inputLogicalSize);
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           if (batch.selectedInUse) {
             if (inputSelectedInUse) {
               LOG.debug(CLASS_NAME +
@@ -265,7 +265,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut,
@@ -276,7 +276,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -445,7 +445,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
index 6e7e5cb..f1a5c2e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
@@ -134,7 +134,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -163,7 +163,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
           ve.evaluate(batch);
         }
         someRowsFilteredOut = (batch.size != inputLogicalSize);
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           if (batch.selectedInUse) {
             if (inputSelectedInUse) {
               LOG.debug(CLASS_NAME +
@@ -234,7 +234,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
          * Common repeated join result processing.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut,
@@ -245,7 +245,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
          * NOT Repeating.
          */
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -413,7 +413,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
           }
         }
 
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
index 10bc902..89087e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
@@ -40,8 +40,6 @@ public abstract class VectorMapJoinFastBytesHashTable
 
   private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastBytesHashTable.class);
 
-  private final boolean isLogDebugEnabled = LOG.isDebugEnabled();
-
   protected VectorMapJoinFastKeyStore keyStore;
 
   protected BytesWritable testKeyBytesWritable;
@@ -90,7 +88,7 @@ public abstract class VectorMapJoinFastBytesHashTable
     }
 
     if (largestNumberOfSteps < i) {
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Probed " + i + " slots (the longest so far) to find space");
       }
       largestNumberOfSteps = i;
@@ -144,7 +142,7 @@ public abstract class VectorMapJoinFastBytesHashTable
         }
 
         if (newLargestNumberOfSteps < i) {
-          if (isLogDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Probed " + i + " slots (the longest so far) to find space");
           }
           newLargestNumberOfSteps = i;

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
index 54e667c..f610653 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
@@ -47,8 +47,6 @@ public abstract class VectorMapJoinFastLongHashTable
 
   public static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastLongHashTable.class);
 
-  private transient final boolean isLogDebugEnabled = LOG.isDebugEnabled();
-
   private final HashTableKeyType hashTableKeyType;
 
   private final boolean isOuterJoin;
@@ -131,7 +129,7 @@ public abstract class VectorMapJoinFastLongHashTable
     }
 
     if (largestNumberOfSteps < i) {
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Probed " + i + " slots (the longest so far) to find space");
       }
       largestNumberOfSteps = i;
@@ -195,7 +193,7 @@ public abstract class VectorMapJoinFastLongHashTable
         }
 
         if (newLargestNumberOfSteps < i) {
-          if (isLogDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Probed " + i + " slots (the longest so far) to find space");
           }
           newLargestNumberOfSteps = i;

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
index 99819cf..496af0b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
@@ -257,7 +257,7 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
   protected void initializeOp(Configuration hconf) throws HiveException {
     super.initializeOp(hconf);
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("useUniformHash " + vectorReduceSinkInfo.getUseUniformHash());
   
       LOG.debug("reduceSinkKeyColumnMap " +
@@ -315,7 +315,7 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
     reduceSkipTag = conf.getSkipTag();
     reduceTagByte = (byte) conf.getTag();
 
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Using tag = " + (int) reduceTagByte);
     }
 
@@ -409,7 +409,7 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
     // forward is not called
     if (null != out) {
       numRows++;
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         if (numRows == cntr) {
           cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
           if (cntr < 0 || numRows < 0) {
@@ -438,7 +438,7 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
     super.closeOp(abort);
     out = null;
     reducerHash = null;
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info(toString() + ": records written - " + numRows);
     }
     recordCounter.set(numRows);

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
index 9299306..58ea8f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
@@ -54,7 +54,6 @@ import com.google.common.collect.Lists;
 /** Metastore-based footer cache storing serialized footers. Also has a local cache. */
 public class ExternalCache implements FooterCache {
   private static final Logger LOG = LoggerFactory.getLogger(ExternalCache.class);
-  private static boolean isDebugEnabled = LOG.isDebugEnabled();
 
   private final LocalCache localCache;
   private final ExternalFooterCachesByConf externalCacheSrc;
@@ -194,7 +193,7 @@ public class ExternalCache implements FooterCache {
       Long fileId = file.getFileId();
       if (fileId == null) {
         if (!isInTest) {
-          if (!isWarnLogged || isDebugEnabled) {
+          if (!isWarnLogged || LOG.isDebugEnabled()) {
             LOG.warn("Not using metastore cache because fileId is missing: " + fs.getPath());
             isWarnLogged = true;
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 8fb7211..0ef7c75 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -158,7 +158,6 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   }
 
   private static final Logger LOG = LoggerFactory.getLogger(OrcInputFormat.class);
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
   static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
 
   private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024;
@@ -1682,7 +1681,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
             allowSyntheticFileIds);
 
         for (SplitStrategy<?> splitStrategy : splitStrategies) {
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Split strategy: {}", splitStrategy);
           }
 
@@ -1725,7 +1724,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
           + context.numFilesCounter.get());
     }
 
-    if (isDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       for (OrcSplit split : splits) {
         LOG.debug(split + " projected_columns_uncompressed_size: "
             + split.getColumnarProjectionSize());
@@ -1795,7 +1794,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   @Override
   public InputSplit[] getSplits(JobConf job,
                                 int numSplits) throws IOException {
-    if (isDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("getSplits started");
     }
     Configuration conf = job;
@@ -1805,7 +1804,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     }
     List<OrcSplit> result = generateSplitsInfo(conf,
         new Context(conf, numSplits, createExternalCaches()));
-    if (isDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("getSplits finished");
     }
     return result.toArray(new InputSplit[result.size()]);
@@ -2100,7 +2099,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     for (int i = 0; i < includeStripe.length; ++i) {
       includeStripe[i] = (i >= stripeStats.size()) ||
           isStripeSatisfyPredicate(stripeStats.get(i), sarg, filterColumns, evolution);
-      if (isDebugEnabled && !includeStripe[i]) {
+      if (LOG.isDebugEnabled() && !includeStripe[i]) {
         LOG.debug("Eliminating ORC stripe-" + i + " of file '" + filePath
             + "'  as it did not satisfy predicate condition.");
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 6844713..fc6adaf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -103,8 +103,6 @@ import com.google.common.collect.Sets;
 public class StatsRulesProcFactory {
 
   private static final Logger LOG = LoggerFactory.getLogger(StatsRulesProcFactory.class.getName());
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
-
 
   /**
    * Collect basic statistics like number of rows, data size and column level statistics from the
@@ -130,7 +128,7 @@ public class StatsRulesProcFactory {
         Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop);
         tsop.setStatistics(stats.clone());
 
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName() + "): " +
               stats.extendedToString());
         }
@@ -194,14 +192,14 @@ public class StatsRulesProcFactory {
           }
           sop.setStatistics(stats);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + sop.toString() + ": " + stats.extendedToString());
           }
         } else {
           if (parentStats != null) {
             sop.setStatistics(parentStats.clone());
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[1] STATS-" + sop.toString() + ": " + parentStats.extendedToString());
             }
           }
@@ -291,7 +289,7 @@ public class StatsRulesProcFactory {
               updateStats(st, newNumRows, true, fop);
             }
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[0] STATS-" + fop.toString() + ": " + st.extendedToString());
             }
           } else {
@@ -301,7 +299,7 @@ public class StatsRulesProcFactory {
               updateStats(st, newNumRows, false, fop);
             }
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[1] STATS-" + fop.toString() + ": " + st.extendedToString());
             }
           }
@@ -321,7 +319,7 @@ public class StatsRulesProcFactory {
       Statistics andStats = null;
 
       if (stats.getNumRows() <= 1 || stats.getDataSize() <= 0) {
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Estimating row count for " + pred + " Original num rows: " + stats.getNumRows() +
               " Original data size: " + stats.getDataSize() + " New num rows: 1");
         }
@@ -403,7 +401,7 @@ public class StatsRulesProcFactory {
         }
       }
 
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Estimating row count for " + pred + " Original num rows: " + stats.getNumRows() +
             " New num rows: " + newNumRows);
       }
@@ -1057,7 +1055,7 @@ public class StatsRulesProcFactory {
         parallelism = (int) Math.ceil((double) inputSize / maxSplitSize);
       }
 
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("STATS-" + gop.toString() + ": inputSize: " + inputSize + " maxSplitSize: " +
             maxSplitSize + " parallelism: " + parallelism + " containsGroupingSet: " +
             containsGroupingSet + " sizeOfGroupingSet: " + sizeOfGroupingSet);
@@ -1070,7 +1068,7 @@ public class StatsRulesProcFactory {
           // check if map side aggregation is possible or not based on column stats
           hashAgg = checkMapSideAggregation(gop, colStats, conf);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("STATS-" + gop.toString() + " hashAgg: " + hashAgg);
           }
 
@@ -1109,7 +1107,7 @@ public class StatsRulesProcFactory {
           if (ndvProduct == 0) {
             ndvProduct = parentNumRows / 2;
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("STATS-" + gop.toString() + ": ndvProduct became 0 as some column does not" +
                   " have stats. ndvProduct changed to: " + ndvProduct);
             }
@@ -1124,14 +1122,14 @@ public class StatsRulesProcFactory {
                     (StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet)) / 2,
                     StatsUtils.safeMult(StatsUtils.safeMult(ndvProduct, parallelism), sizeOfGroupingSet));
 
-                if (isDebugEnabled) {
+                if (LOG.isDebugEnabled()) {
                   LOG.debug("[Case 4] STATS-" + gop.toString() + ": cardinality: " + cardinality);
                 }
               } else {
                 // Case 3: column stats, hash aggregation, NO grouping sets
                 cardinality = Math.min(parentNumRows / 2, StatsUtils.safeMult(ndvProduct, parallelism));
 
-                if (isDebugEnabled) {
+                if (LOG.isDebugEnabled()) {
                   LOG.debug("[Case 3] STATS-" + gop.toString() + ": cardinality: " + cardinality);
                 }
               }
@@ -1140,14 +1138,14 @@ public class StatsRulesProcFactory {
                 // Case 6: column stats, NO hash aggregation, grouping sets
                 cardinality = StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet);
 
-                if (isDebugEnabled) {
+                if (LOG.isDebugEnabled()) {
                   LOG.debug("[Case 6] STATS-" + gop.toString() + ": cardinality: " + cardinality);
                 }
               } else {
                 // Case 5: column stats, NO hash aggregation, NO grouping sets
                 cardinality = parentNumRows;
 
-                if (isDebugEnabled) {
+                if (LOG.isDebugEnabled()) {
                   LOG.debug("[Case 5] STATS-" + gop.toString() + ": cardinality: " + cardinality);
                 }
               }
@@ -1166,14 +1164,14 @@ public class StatsRulesProcFactory {
               sizeOfGroupingSet = mGop.getConf().getListGroupingSets().size();
               cardinality = Math.min(parentNumRows, StatsUtils.safeMult(ndvProduct, sizeOfGroupingSet));
 
-              if (isDebugEnabled) {
+              if (LOG.isDebugEnabled()) {
                 LOG.debug("[Case 8] STATS-" + gop.toString() + ": cardinality: " + cardinality);
               }
             } else {
               // Case 9: column stats, NO grouping sets
               cardinality = Math.min(parentNumRows, ndvProduct);
 
-              if (isDebugEnabled) {
+              if (LOG.isDebugEnabled()) {
                 LOG.debug("[Case 9] STATS-" + gop.toString() + ": cardinality: " + cardinality);
               }
             }
@@ -1196,14 +1194,14 @@ public class StatsRulesProcFactory {
                 // Case 2: NO column stats, NO hash aggregation, grouping sets
                 cardinality = StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet);
 
-                if (isDebugEnabled) {
+                if (LOG.isDebugEnabled()) {
                   LOG.debug("[Case 2] STATS-" + gop.toString() + ": cardinality: " + cardinality);
                 }
               } else {
                 // Case 1: NO column stats, NO hash aggregation, NO grouping sets
                 cardinality = parentNumRows;
 
-                if (isDebugEnabled) {
+                if (LOG.isDebugEnabled()) {
                   LOG.debug("[Case 1] STATS-" + gop.toString() + ": cardinality: " + cardinality);
                 }
               }
@@ -1212,7 +1210,7 @@ public class StatsRulesProcFactory {
               // Case 7: NO column stats
               cardinality = parentNumRows / 2;
 
-              if (isDebugEnabled) {
+              if (LOG.isDebugEnabled()) {
                 LOG.debug("[Case 7] STATS-" + gop.toString() + ": cardinality: " + cardinality);
               }
             }
@@ -1263,7 +1261,7 @@ public class StatsRulesProcFactory {
 
         gop.setStatistics(stats);
 
-        if (isDebugEnabled && stats != null) {
+        if (LOG.isDebugEnabled() && stats != null) {
           LOG.debug("[0] STATS-" + gop.toString() + ": " + stats.extendedToString());
         }
       } catch (CloneNotSupportedException e) {
@@ -1548,7 +1546,7 @@ public class StatsRulesProcFactory {
         updateColStats(conf, stats, newRowCount, jop, rowCountParents);
         jop.setStatistics(stats);
 
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("[0] STATS-" + jop.toString() + ": " + stats.extendedToString());
         }
       } else {
@@ -1606,7 +1604,7 @@ public class StatsRulesProcFactory {
         wcStats.setDataSize(newDataSize);
         jop.setStatistics(wcStats);
 
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("[1] STATS-" + jop.toString() + ": " + wcStats.extendedToString());
         }
       }
@@ -1641,7 +1639,7 @@ public class StatsRulesProcFactory {
         newNumRows = getCardinality(parents, pkPos, csPK, csFKs, jop);
 
         // some debug information
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           List<String> parentIds = Lists.newArrayList();
 
           // print primary key containing parents
@@ -2060,7 +2058,7 @@ public class StatsRulesProcFactory {
           }
           lop.setStatistics(stats);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + lop.toString() + ": " + stats.extendedToString());
           }
         } else {
@@ -2079,7 +2077,7 @@ public class StatsRulesProcFactory {
             }
             lop.setStatistics(wcStats);
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[1] STATS-" + lop.toString() + ": " + wcStats.extendedToString());
             }
           }
@@ -2147,7 +2145,7 @@ public class StatsRulesProcFactory {
             outStats.setColumnStats(colStats);
           }
           rop.setStatistics(outStats);
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + rop.toString() + ": " + outStats.extendedToString());
           }
         } catch (CloneNotSupportedException e) {
@@ -2192,7 +2190,7 @@ public class StatsRulesProcFactory {
                   stats.addToColumnStats(colStats);
                   op.getConf().setStatistics(stats);
 
-                  if (isDebugEnabled) {
+                  if (LOG.isDebugEnabled()) {
                     LOG.debug("[0] STATS-" + op.toString() + ": " + stats.extendedToString());
                   }
                 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java
index aa19d09..d43aa35 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java
@@ -27,8 +27,7 @@ import org.apache.hadoop.io.BytesWritable;
 public class LazyBinary extends LazyPrimitive<LazyBinaryObjectInspector, BytesWritable> {
 
   private static final Logger LOG = LoggerFactory.getLogger(LazyBinary.class);
-  private static final boolean DEBUG_LOG_ENABLED = LOG.isDebugEnabled();
-  
+
   public LazyBinary(LazyBinaryObjectInspector oi) {
     super(oi);
     data = new BytesWritable();
@@ -56,7 +55,7 @@ public class LazyBinary extends LazyPrimitive<LazyBinaryObjectInspector, BytesWr
   // todo this should be configured in serde
   public static byte[] decodeIfNeeded(byte[] recv) {
     boolean arrayByteBase64 = Base64.isArrayByteBase64(recv);
-    if (DEBUG_LOG_ENABLED && arrayByteBase64) {
+    if (LOG.isDebugEnabled() && arrayByteBase64) {
       LOG.debug("Data only contains Base64 alphabets only so try to decode the data.");
     }
     return arrayByteBase64 ? Base64.decodeBase64(recv) : recv;


[10/12] hive git commit: HIVE-16727 : REPL DUMP for insert event should't fail if the table is already dropped. (Sankar Hariappan via Thejas Nair

Posted by we...@apache.org.
HIVE-16727 : REPL DUMP for insert event should't fail if the table is already dropped. (Sankar Hariappan via Thejas Nair


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8dcc78a2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8dcc78a2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8dcc78a2

Branch: refs/heads/hive-14535
Commit: 8dcc78a21488a5fe2ec9e42084a61bf38653ddd6
Parents: bbf0629
Author: Sankar Hariappan <ma...@gmail.com>
Authored: Mon May 29 23:37:07 2017 -0700
Committer: Thejas M Nair <th...@hortonworks.com>
Committed: Mon May 29 23:37:07 2017 -0700

----------------------------------------------------------------------
 .../listener/DbNotificationListener.java        |  11 +-
 .../listener/TestDbNotificationListener.java    |  28 +---
 .../hive/ql/parse/TestReplicationScenarios.java | 130 +++++++++++++++++++
 .../hive/metastore/events/InsertEvent.java      |  44 +++----
 .../hive/metastore/messaging/InsertMessage.java |  24 ++--
 .../metastore/messaging/MessageFactory.java     |  10 +-
 .../messaging/json/JSONInsertMessage.java       |  51 +++++---
 .../messaging/json/JSONMessageFactory.java      |  10 +-
 .../parse/repl/dump/events/InsertHandler.java   |  28 ++--
 9 files changed, 231 insertions(+), 105 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index 6f96e1d..e598a6b 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -437,13 +437,14 @@ public class DbNotificationListener extends MetaStoreEventListener {
   }
   @Override
   public void onInsert(InsertEvent insertEvent) throws MetaException {
+    Table tableObj = insertEvent.getTableObj();
     NotificationEvent event =
-        new NotificationEvent(0, now(), EventType.INSERT.toString(), msgFactory.buildInsertMessage(
-            insertEvent.getDb(), insertEvent.getTable(), insertEvent.getPartitionKeyValues(), insertEvent.isReplace(),
+        new NotificationEvent(0, now(), EventType.INSERT.toString(), msgFactory.buildInsertMessage(tableObj,
+                insertEvent.getPartitionObj(), insertEvent.isReplace(),
             new FileChksumIterator(insertEvent.getFiles(), insertEvent.getFileChecksums()))
-            .toString());
-    event.setDbName(insertEvent.getDb());
-    event.setTableName(insertEvent.getTable());
+                .toString());
+    event.setDbName(tableObj.getDbName());
+    event.setTableName(tableObj.getTableName());
     process(event, insertEvent);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
index 2168a67..808c9c7 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
@@ -1227,8 +1227,9 @@ public class TestDbNotificationListener {
     FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
     List<FieldSchema> partCols = new ArrayList<FieldSchema>();
     List<String> partCol1Vals = Arrays.asList("today");
-    LinkedHashMap<String, String> partKeyVals = new LinkedHashMap<String, String>();
-    partKeyVals.put("ds", "today");
+    List<String> partKeyVals = new ArrayList<String>();
+    partKeyVals.add("today");
+
     partCols.add(partCol1);
     Table table =
         new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols,
@@ -1264,9 +1265,9 @@ public class TestDbNotificationListener {
     // Parse the message field
     verifyInsert(event, defaultDbName, tblName);
     InsertMessage insertMessage = md.getInsertMessage(event.getMessage());
-    Map<String,String> partKeyValsFromNotif = insertMessage.getPartitionKeyValues();
+    List<String> ptnValues = insertMessage.getPtnObj().getValues();
 
-    assertMapEquals(partKeyVals, partKeyValsFromNotif);
+    assertEquals(partKeyVals, ptnValues);
 
     // Verify the eventID was passed to the non-transactional listener
     MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.INSERT, firstEventId + 3);
@@ -1528,31 +1529,16 @@ public class TestDbNotificationListener {
     InsertMessage insertMsg = md.getInsertMessage(event.getMessage());
     System.out.println("InsertMessage: " + insertMsg.toString());
     if (dbName != null ){
-      assertEquals(dbName, insertMsg.getDB());
+      assertEquals(dbName, insertMsg.getTableObj().getDbName());
     }
     if (tblName != null){
-      assertEquals(tblName, insertMsg.getTable());
+      assertEquals(tblName, insertMsg.getTableObj().getTableName());
     }
     // Should have files
     Iterator<String> files = insertMsg.getFiles().iterator();
     assertTrue(files.hasNext());
   }
 
-
-  private void assertMapEquals(Map<String, String> map1, Map<String, String> map2) {
-    // non ordered, non-classed map comparison - use sparingly instead of assertEquals
-    // only if you're sure that the order does not matter.
-    if ((map1 == null) || (map2 == null)){
-      assertNull(map1);
-      assertNull(map2);
-    }
-    assertEquals(map1.size(),map2.size());
-    for (String k : map1.keySet()){
-      assertTrue(map2.containsKey(k));
-      assertEquals(map1.get(k), map2.get(k));
-    }
-  }
-
   @Test
   public void cleanupNotifs() throws Exception {
     Database db = new Database("cleanup1", "no description", "file:/tmp", emptyParameters);

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 21f09ae..766d858 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -1282,6 +1282,136 @@ public class TestReplicationScenarios {
   }
 
   @Test
+  public void testIncrementalInsertDropUnpartitionedTable() throws IOException {
+    String testName = "incrementalInsertDropUnpartitionedTable";
+    String dbName = createDB(testName);
+    run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
+
+    advanceDumpDir();
+    run("REPL DUMP " + dbName);
+    String replDumpLocn = getResult(0, 0);
+    String replDumpId = getResult(0, 1, true);
+    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
+    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
+
+    String[] unptn_data = new String[] { "eleven", "twelve" };
+
+    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')");
+    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')");
+    verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
+
+    run("CREATE TABLE " + dbName + ".unptned_tmp AS SELECT * FROM " + dbName + ".unptned");
+    verifySetup("SELECT a from " + dbName + ".unptned_tmp ORDER BY a", unptn_data);
+
+    // Get the last repl ID corresponding to all insert/alter/create events except DROP.
+    advanceDumpDir();
+    run("REPL DUMP " + dbName + " FROM " + replDumpId);
+    String lastDumpIdWithoutDrop = getResult(0, 1);
+
+    // Drop all the tables
+    run("DROP TABLE " + dbName + ".unptned");
+    run("DROP TABLE " + dbName + ".unptned_tmp");
+    verifyFail("SELECT * FROM " + dbName + ".unptned");
+    verifyFail("SELECT * FROM " + dbName + ".unptned_tmp");
+
+    // Dump all the events except DROP
+    advanceDumpDir();
+    run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutDrop);
+    String incrementalDumpLocn = getResult(0, 0);
+    String incrementalDumpId = getResult(0, 1, true);
+    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
+    replDumpId = incrementalDumpId;
+
+    // Need to find the tables and data as drop is not part of this dump
+    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data);
+    verifyRun("SELECT a from " + dbName + "_dupe.unptned_tmp ORDER BY a", unptn_data);
+
+    // Dump the drop events and check if tables are getting dropped in target as well
+    advanceDumpDir();
+    run("REPL DUMP " + dbName + " FROM " + replDumpId);
+    incrementalDumpLocn = getResult(0, 0);
+    incrementalDumpId = getResult(0, 1, true);
+    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
+    replDumpId = incrementalDumpId;
+
+    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+    verifyFail("SELECT * FROM " + dbName + ".unptned");
+    verifyFail("SELECT * FROM " + dbName + ".unptned_tmp");
+  }
+
+  @Test
+  public void testIncrementalInsertDropPartitionedTable() throws IOException {
+    String testName = "incrementalInsertDropPartitionedTable";
+    String dbName = createDB(testName);
+    run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE");
+
+    advanceDumpDir();
+    run("REPL DUMP " + dbName);
+    String replDumpLocn = getResult(0, 0);
+    String replDumpId = getResult(0, 1, true);
+    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
+    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
+
+    String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" };
+    String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" };
+
+    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')");
+    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')");
+    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[2] + "')");
+
+    run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=20)");
+    run("ALTER TABLE " + dbName + ".ptned RENAME PARTITION (b=20) TO PARTITION (b=2");
+    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')");
+    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')");
+    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[2] + "')");
+    verifySetup("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1);
+    verifySetup("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2);
+
+    run("CREATE TABLE " + dbName + ".ptned_tmp AS SELECT * FROM " + dbName + ".ptned");
+    verifySetup("SELECT a from " + dbName + ".ptned_tmp where (b=1) ORDER BY a", ptn_data_1);
+    verifySetup("SELECT a from " + dbName + ".ptned_tmp where (b=2) ORDER BY a", ptn_data_2);
+
+    // Get the last repl ID corresponding to all insert/alter/create events except DROP.
+    advanceDumpDir();
+    run("REPL DUMP " + dbName + " FROM " + replDumpId);
+    String lastDumpIdWithoutDrop = getResult(0, 1);
+
+    // Drop all the tables
+    run("DROP TABLE " + dbName + ".ptned_tmp");
+    run("DROP TABLE " + dbName + ".ptned");
+    verifyFail("SELECT * FROM " + dbName + ".ptned_tmp");
+    verifyFail("SELECT * FROM " + dbName + ".ptned");
+
+    // Dump all the events except DROP
+    advanceDumpDir();
+    run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutDrop);
+    String incrementalDumpLocn = getResult(0, 0);
+    String incrementalDumpId = getResult(0, 1, true);
+    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
+    replDumpId = incrementalDumpId;
+
+    // Need to find the tables and data as drop is not part of this dump
+    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+    verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1);
+    verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2);
+    verifyRun("SELECT a from " + dbName + "_dupe.ptned_tmp where (b=1) ORDER BY a", ptn_data_1);
+    verifyRun("SELECT a from " + dbName + "_dupe.ptned_tmp where (b=2) ORDER BY a", ptn_data_2);
+
+    // Dump the drop events and check if tables are getting dropped in target as well
+    advanceDumpDir();
+    run("REPL DUMP " + dbName + " FROM " + replDumpId);
+    incrementalDumpLocn = getResult(0, 0);
+    incrementalDumpId = getResult(0, 1, true);
+    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
+    replDumpId = incrementalDumpId;
+
+    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+    verifyFail("SELECT * FROM " + dbName + ".ptned_tmp");
+    verifyFail("SELECT * FROM " + dbName + ".ptned");
+  }
+
+  @Test
   public void testViewsReplication() throws IOException {
     String testName = "viewsReplication";
     String dbName = createDB(testName);

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
index dff1195..c33ade1 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
@@ -24,20 +24,16 @@ import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 
 import java.util.ArrayList;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
 
 public class InsertEvent extends ListenerEvent {
 
-  // Note that this event is fired from the client, so rather than having full metastore objects
-  // we have just the string names, but that's fine for what we need.
-  private final String db;
-  private final String table;
-  private final Map<String, String> keyValues;
+  private final Table tableObj;
+  private final Partition ptnObj;
   private final boolean replace;
   private final List<String> files;
   private List<String> fileChecksums = new ArrayList<String>();
@@ -55,42 +51,36 @@ public class InsertEvent extends ListenerEvent {
       InsertEventRequestData insertData, boolean status, HMSHandler handler) throws MetaException,
       NoSuchObjectException {
     super(status, handler);
-    this.db = db;
-    this.table = table;
 
-    // If replace flag is not set by caller, then by default set it to true to maintain backward compatibility
-    this.replace = (insertData.isSetReplace() ? insertData.isReplace() : true);
-    this.files = insertData.getFilesAdded();
     GetTableRequest req = new GetTableRequest(db, table);
     req.setCapabilities(HiveMetaStoreClient.TEST_VERSION);
-    Table t = handler.get_table_req(req).getTable();
-    keyValues = new LinkedHashMap<String, String>();
+    this.tableObj = handler.get_table_req(req).getTable();
     if (partVals != null) {
-      for (int i = 0; i < partVals.size(); i++) {
-        keyValues.put(t.getPartitionKeys().get(i).getName(), partVals.get(i));
-      }
+      this.ptnObj = handler.get_partition(db, table, partVals);
+    } else {
+      this.ptnObj = null;
     }
+
+    // If replace flag is not set by caller, then by default set it to true to maintain backward compatibility
+    this.replace = (insertData.isSetReplace() ? insertData.isReplace() : true);
+    this.files = insertData.getFilesAdded();
     if (insertData.isSetFilesAddedChecksum()) {
       fileChecksums = insertData.getFilesAddedChecksum();
     }
   }
 
-  public String getDb() {
-    return db;
-  }
-
   /**
-   * @return The table.
+   * @return Table object
    */
-  public String getTable() {
-    return table;
+  public Table getTableObj() {
+    return tableObj;
   }
 
   /**
-   * @return List of values for the partition keys.
+   * @return Partition object
    */
-  public Map<String, String> getPartitionKeyValues() {
-    return keyValues;
+  public Partition getPartitionObj() {
+    return ptnObj;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
index 6d146e0..6505c67 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
@@ -19,7 +19,8 @@
 
 package org.apache.hadoop.hive.metastore.messaging;
 
-import java.util.Map;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
 
 /**
  * HCat message sent when an insert is done to a table or partition.
@@ -43,19 +44,26 @@ public abstract class InsertMessage extends EventMessage {
   public abstract boolean isReplace();
 
   /**
-   * Get the map of partition keyvalues.  Will be null if this insert is to a table and not a
-   * partition.
-   * @return Map of partition keyvalues, or null.
-   */
-  public abstract Map<String,String> getPartitionKeyValues();
-
-  /**
    * Get list of file name and checksum created as a result of this DML operation
    *
    * @return The iterable of files
    */
   public abstract Iterable<String> getFiles();
 
+  /**
+   * Get the table object associated with the insert
+   *
+   * @return The Json format of Table object
+   */
+  public abstract Table getTableObj() throws Exception;
+
+  /**
+   * Get the partition object associated with the insert
+   *
+   * @return The Json format of Partition object if the table is partitioned else return null.
+   */
+  public abstract Partition getPtnObj() throws Exception;
+
   @Override
   public EventMessage checkValid() {
     if (getTable() == null)

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
index 1bd52a8..9437e8b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.Iterator;
-import java.util.Map;
 
 /**
  * Abstract Factory for the construction of HCatalog message instances.
@@ -229,14 +228,13 @@ public abstract class MessageFactory {
   /**
    * Factory method for building insert message
    *
-   * @param db Name of the database the insert occurred in
-   * @param table Name of the table the insert occurred in
-   * @param partVals Partition values for the partition that the insert occurred in, may be null if
+   * @param tableObj Table object where the insert occurred in
+   * @param ptnObj Partition object where the insert occurred in, may be null if
    *          the insert was done into a non-partitioned table
    * @param replace Flag to represent if INSERT OVERWRITE or INSERT INTO
    * @param files Iterator of file created
    * @return instance of InsertMessage
    */
-  public abstract InsertMessage buildInsertMessage(String db, String table,
-      Map<String, String> partVals, boolean replace, Iterator<String> files);
+  public abstract InsertMessage buildInsertMessage(Table tableObj, Partition ptnObj,
+                                                   boolean replace, Iterator<String> files);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
index c059d47..18a15f5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
@@ -19,14 +19,16 @@
 
 package org.apache.hadoop.hive.metastore.messaging.json;
 
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
+import org.apache.thrift.TException;
 import org.codehaus.jackson.annotate.JsonProperty;
 
 import com.google.common.collect.Lists;
 
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 
 /**
  * JSON implementation of InsertMessage
@@ -34,7 +36,7 @@ import java.util.Map;
 public class JSONInsertMessage extends InsertMessage {
 
   @JsonProperty
-  String server, servicePrincipal, db, table;
+  String server, servicePrincipal, db, table, tableObjJson, ptnObjJson;
 
   @JsonProperty
   Long timestamp;
@@ -45,25 +47,39 @@ public class JSONInsertMessage extends InsertMessage {
   @JsonProperty
   List<String> files;
 
-  @JsonProperty
-  Map<String, String> partKeyVals;
-
   /**
    * Default constructor, needed for Jackson.
    */
   public JSONInsertMessage() {
   }
 
-  public JSONInsertMessage(String server, String servicePrincipal, String db, String table,
-      Map<String, String> partKeyVals, boolean replace, Iterator<String> fileIter, Long timestamp) {
+  public JSONInsertMessage(String server, String servicePrincipal, Table tableObj, Partition ptnObj,
+                           boolean replace, Iterator<String> fileIter, Long timestamp) {
     this.server = server;
     this.servicePrincipal = servicePrincipal;
-    this.db = db;
-    this.table = table;
+
+    if (null == tableObj) {
+      throw new IllegalArgumentException("Table not valid.");
+    }
+
+    this.db = tableObj.getDbName();
+    this.table = tableObj.getTableName();
+
+    try {
+      this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj);
+      if (null != ptnObj) {
+        this.ptnObjJson = JSONMessageFactory.createPartitionObjJson(ptnObj);
+      } else {
+        this.ptnObjJson = null;
+      }
+    } catch (TException e) {
+      throw new IllegalArgumentException("Could not serialize: ", e);
+    }
+
     this.timestamp = timestamp;
     this.replace = Boolean.toString(replace);
-    this.partKeyVals = partKeyVals;
     this.files = Lists.newArrayList(fileIter);
+
     checkValid();
   }
 
@@ -78,11 +94,6 @@ public class JSONInsertMessage extends InsertMessage {
   }
 
   @Override
-  public Map<String, String> getPartitionKeyValues() {
-    return partKeyVals;
-  }
-
-  @Override
   public Iterable<String> getFiles() {
     return files;
   }
@@ -106,6 +117,16 @@ public class JSONInsertMessage extends InsertMessage {
   public boolean isReplace() { return Boolean.parseBoolean(replace); }
 
   @Override
+  public Table getTableObj() throws Exception {
+    return (Table) JSONMessageFactory.getTObj(tableObjJson,Table.class);
+  }
+
+  @Override
+  public Partition getPtnObj() throws Exception {
+    return ((null == ptnObjJson) ? null : (Partition) JSONMessageFactory.getTObj(ptnObjJson, Partition.class));
+  }
+
+  @Override
   public String toString() {
     try {
       return JSONMessageDeserializer.mapper.writeValueAsString(this);

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
index 04a4041..a4c31f2 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
@@ -28,10 +28,6 @@ import javax.annotation.Nullable;
 
 import com.google.common.collect.Iterables;
 
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -165,9 +161,9 @@ public class JSONMessageFactory extends MessageFactory {
   }
 
   @Override
-  public InsertMessage buildInsertMessage(String db, String table, Map<String, String> partKeyVals, boolean replace,
-      Iterator<String> fileIter) {
-    return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, table, partKeyVals, replace, fileIter, now());
+  public InsertMessage buildInsertMessage(Table tableObj, Partition partObj,
+                                          boolean replace, Iterator<String> fileIter) {
+    return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, tableObj, partObj, replace, fileIter, now());
   }
 
   private long now() {

http://git-wip-us.apache.org/repos/asf/hive/blob/8dcc78a2/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
index f514fb2..956bb08 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
@@ -23,18 +23,15 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.parse.EximUtil;
-import org.apache.thrift.TException;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 import java.io.BufferedWriter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 class InsertHandler extends AbstractEventHandler {
 
@@ -45,11 +42,10 @@ class InsertHandler extends AbstractEventHandler {
   @Override
   public void handle(Context withinContext) throws Exception {
     InsertMessage insertMsg = deserializer.getInsertMessage(event.getMessage());
-    org.apache.hadoop.hive.ql.metadata.Table qlMdTable = tableObject(withinContext, insertMsg);
-    Map<String, String> partSpec = insertMsg.getPartitionKeyValues();
+    org.apache.hadoop.hive.ql.metadata.Table qlMdTable = tableObject(insertMsg);
     List<Partition> qlPtns = null;
-    if (qlMdTable.isPartitioned() && !partSpec.isEmpty()) {
-      qlPtns = Collections.singletonList(withinContext.db.getPartition(qlMdTable, partSpec, false));
+    if (qlMdTable.isPartitioned() && (null != insertMsg.getPtnObj())) {
+      qlPtns = Collections.singletonList(partitionObject(qlMdTable, insertMsg));
     }
     Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
 
@@ -88,13 +84,13 @@ class InsertHandler extends AbstractEventHandler {
     dmd.write();
   }
 
-  private org.apache.hadoop.hive.ql.metadata.Table tableObject(
-      Context withinContext, InsertMessage insertMsg) throws TException {
-    return new org.apache.hadoop.hive.ql.metadata.Table(
-        withinContext.db.getMSC().getTable(
-            insertMsg.getDB(), insertMsg.getTable()
-        )
-    );
+  private org.apache.hadoop.hive.ql.metadata.Table tableObject(InsertMessage insertMsg) throws Exception {
+    return new org.apache.hadoop.hive.ql.metadata.Table(insertMsg.getTableObj());
+  }
+
+  private org.apache.hadoop.hive.ql.metadata.Partition partitionObject(
+          org.apache.hadoop.hive.ql.metadata.Table qlMdTable, InsertMessage insertMsg) throws Exception {
+    return new org.apache.hadoop.hive.ql.metadata.Partition(qlMdTable, insertMsg.getPtnObj());
   }
 
   private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException {


[11/12] hive git commit: HIVE-16507: Hive Explain User-Level may print out "Vertex dependency in root stage" twice (Sahil Takiar, reviewed by Pengcheng Xiong)

Posted by we...@apache.org.
HIVE-16507: Hive Explain User-Level may print out "Vertex dependency in root stage" twice (Sahil Takiar, reviewed by Pengcheng Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4cd42513
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4cd42513
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4cd42513

Branch: refs/heads/hive-14535
Commit: 4cd425132e54d7c48aa87fb8509b6011f5139862
Parents: 8dcc78a
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Tue May 30 10:27:42 2017 -0700
Committer: Pengcheng Xiong <px...@hortonworks.com>
Committed: Tue May 30 10:27:42 2017 -0700

----------------------------------------------------------------------
 .../hive/common/jsonexplain/DagJsonParser.java  |  6 ++++-
 .../results/clientpositive/llap/cte_mat_3.q.out |  2 +-
 .../results/clientpositive/llap/cte_mat_4.q.out |  4 ++--
 .../results/clientpositive/llap/cte_mat_5.q.out |  2 +-
 .../spark/spark_explainuser_1.q.out             | 24 ++++++++++----------
 5 files changed, 21 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4cd42513/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
index 61746c8..f4a9248 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
@@ -117,7 +117,11 @@ public abstract class DagJsonParser implements JsonParser {
     // print out the vertex dependency in root stage
     for (Stage candidate : this.stages.values()) {
       if (candidate.tezStageDependency != null && candidate.tezStageDependency.size() > 0) {
-        printer.println("Vertex dependency in root stage");
+        if (candidate.parentStages.size() == 0) {
+          printer.println("Vertex dependency in root stage");
+        } else {
+          printer.println("Vertex dependency in " + candidate.externalName);
+        }
         for (Entry<Vertex, List<Connection>> entry : candidate.tezStageDependency.entrySet()) {
           StringBuilder sb = new StringBuilder();
           sb.append(entry.getKey().name);

http://git-wip-us.apache.org/repos/asf/hive/blob/4cd42513/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
index cb88f0c..31c5959 100644
--- a/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
@@ -12,7 +12,7 @@ on a.key=b.key
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-4
 Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
 
 Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/4cd42513/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
index 1c5be08..c23e985 100644
--- a/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
@@ -51,7 +51,7 @@ on a.key=b.key
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-4
 Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
 
 Stage-3
@@ -201,7 +201,7 @@ on a.key=b.key
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-4
 Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
 
 Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/4cd42513/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
index 0c22b72..6fa2615 100644
--- a/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
@@ -75,7 +75,7 @@ on a.colnum=b.key
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-4
 Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
 
 Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/4cd42513/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
index ca0910a..fd29d6a 100644
--- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
@@ -228,7 +228,7 @@ select src1.key as k1, src1.value as v1,
 POSTHOOK: type: QUERY
 Plan not optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
 Reducer 3 <- Reducer 2 (GROUP)
 
@@ -2821,7 +2821,7 @@ FROM (select x.key AS key, count(1) AS cnt
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 3 <- Map 2 (GROUP)
 Reducer 4 <- Reducer 3 (GROUP)
 
@@ -4178,7 +4178,7 @@ sort by j.p_name)
 POSTHOOK: type: QUERY
 Plan not optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
 Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT)
 
@@ -4400,7 +4400,7 @@ order by p_name
 POSTHOOK: type: QUERY
 Plan not optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
 
 Stage-0
@@ -4725,7 +4725,7 @@ order by p_name
 POSTHOOK: type: QUERY
 Plan not optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
 Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT)
 
@@ -5484,7 +5484,7 @@ POSTHOOK: query: explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAM
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 3 <- Map 2 (GROUP)
 Reducer 4 <- Reducer 3 (GROUP)
 
@@ -5606,7 +5606,7 @@ POSTHOOK: query: explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.va
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (GROUP)
 Reducer 3 <- Reducer 2 (GROUP)
 
@@ -5654,7 +5654,7 @@ POSTHOOK: query: explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k joi
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (GROUP)
 Reducer 3 <- Reducer 2 (GROUP)
 
@@ -5702,7 +5702,7 @@ POSTHOOK: query: explain select count(1) from  T1 a join T1 b on a.key = b.key
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (GROUP)
 Reducer 3 <- Reducer 2 (GROUP)
 
@@ -5748,7 +5748,7 @@ POSTHOOK: query: explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (GROUP)
 Reducer 3 <- Reducer 2 (GROUP)
 
@@ -5792,7 +5792,7 @@ POSTHOOK: query: explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 3 <- Map 2 (GROUP)
 Reducer 4 <- Reducer 3 (GROUP)
 
@@ -5881,7 +5881,7 @@ POSTHOOK: query: explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.va
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
-Vertex dependency in root stage
+Vertex dependency in Stage-1
 Reducer 2 <- Map 1 (GROUP)
 Reducer 3 <- Reducer 2 (GROUP)
 


[02/12] hive git commit: HIVE-16474: Upgrade Druid version to 0.10 (Nishant Bangarwa, reviewed by Jesus Camacho Rodriguez)

Posted by we...@apache.org.
HIVE-16474: Upgrade Druid version to 0.10 (Nishant Bangarwa, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bd3889e9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bd3889e9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bd3889e9

Branch: refs/heads/hive-14535
Commit: bd3889e9f6e887231c796263c5e8b76eaca2fc3a
Parents: 4a7bc89
Author: Nishant Bangarwa <ni...@gmail.com>
Authored: Fri May 26 09:21:10 2017 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri May 26 09:21:10 2017 +0100

----------------------------------------------------------------------
 .../hive/druid/DruidStorageHandlerUtils.java    | 34 ++++++++----
 .../hadoop/hive/druid/io/DruidOutputFormat.java |  7 ++-
 .../hadoop/hive/druid/io/DruidRecordWriter.java |  2 +-
 .../serde/HiveDruidSerializationModule.java     | 37 ++++++++++++++
 .../serde/PeriodGranularitySerializer.java      | 54 ++++++++++++++++++++
 .../hive/druid/DerbyConnectorTestUtility.java   |  4 +-
 .../hadoop/hive/druid/TestDruidSerDe.java       |  2 +-
 .../hive/druid/TestDruidStorageHandler.java     |  2 +-
 .../TestHiveDruidQueryBasedInputFormat.java     | 38 ++++++++------
 .../hive/ql/io/TestDruidRecordWriter.java       | 11 ++--
 pom.xml                                         |  2 +-
 11 files changed, 151 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index adf013b..0e33836 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -17,8 +17,16 @@
  */
 package org.apache.hadoop.hive.druid;
 
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.core.util.VersionUtil;
+import com.fasterxml.jackson.databind.InjectableValues;
+import com.fasterxml.jackson.databind.JsonSerializer;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializerProvider;
 import com.fasterxml.jackson.databind.jsontype.NamedType;
+import com.fasterxml.jackson.databind.jsontype.TypeSerializer;
+import com.fasterxml.jackson.databind.module.SimpleModule;
 import com.fasterxml.jackson.dataformat.smile.SmileFactory;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
@@ -39,6 +47,7 @@ import io.druid.metadata.MetadataStorageTablesConfig;
 import io.druid.metadata.SQLMetadataConnector;
 import io.druid.metadata.storage.mysql.MySQLConnector;
 import io.druid.query.BaseQuery;
+import io.druid.query.select.SelectQueryConfig;
 import io.druid.segment.IndexIO;
 import io.druid.segment.IndexMergerV9;
 import io.druid.segment.column.ColumnConfig;
@@ -50,7 +59,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.druid.serde.HiveDruidSerializationModule;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryProxy;
@@ -75,17 +84,12 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.Reader;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.net.InetAddress;
 import java.net.URL;
-import java.net.URLDecoder;
 import java.net.UnknownHostException;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Enumeration;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -93,10 +97,6 @@ import java.util.Set;
 import java.util.TimeZone;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
-
-import static org.apache.hadoop.hive.ql.exec.Utilities.jarFinderGetJar;
 
 /**
  * Utils class for Druid storage handler.
@@ -117,6 +117,20 @@ public final class DruidStorageHandlerUtils {
    */
   public static final ObjectMapper SMILE_MAPPER = new DefaultObjectMapper(new SmileFactory());
 
+  static
+  {
+    // This is needed for serde of PagingSpec as it uses JacksonInject for injecting SelectQueryConfig
+    InjectableValues.Std injectableValues = new InjectableValues.Std().addValue(
+        SelectQueryConfig.class,
+        new SelectQueryConfig(false)
+    );
+    JSON_MAPPER.setInjectableValues(injectableValues);
+    SMILE_MAPPER.setInjectableValues(injectableValues);
+    HiveDruidSerializationModule hiveDruidSerializationModule = new HiveDruidSerializationModule();
+    JSON_MAPPER.registerModule(hiveDruidSerializationModule);
+    SMILE_MAPPER.registerModule(hiveDruidSerializationModule);
+  }
+
   private static final int NUM_RETRIES = 8;
 
   private static final int SECONDS_BETWEEN_RETRIES = 2;

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
index fbdd4c9..31db86a 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
@@ -21,7 +21,6 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
-import com.metamx.common.Granularity;
 import io.druid.data.input.impl.DimensionSchema;
 import io.druid.data.input.impl.DimensionsSpec;
 import io.druid.data.input.impl.InputRowParser;
@@ -29,7 +28,7 @@ import io.druid.data.input.impl.MapInputRowParser;
 import io.druid.data.input.impl.StringDimensionSchema;
 import io.druid.data.input.impl.TimeAndDimsParseSpec;
 import io.druid.data.input.impl.TimestampSpec;
-import io.druid.granularity.QueryGranularity;
+import io.druid.java.util.common.granularity.Granularity;
 import io.druid.query.aggregation.AggregatorFactory;
 import io.druid.query.aggregation.DoubleSumAggregatorFactory;
 import io.druid.query.aggregation.LongSumAggregatorFactory;
@@ -106,8 +105,8 @@ public class DruidOutputFormat<K, V> implements HiveOutputFormat<K, DruidWritabl
             hdfsDataSegmentPusherConfig, jc, DruidStorageHandlerUtils.JSON_MAPPER);
 
     final GranularitySpec granularitySpec = new UniformGranularitySpec(
-            Granularity.valueOf(segmentGranularity),
-            QueryGranularity.fromString(
+            Granularity.fromString(segmentGranularity),
+            Granularity.fromString(
                     tableProperties.getProperty(Constants.DRUID_QUERY_GRANULARITY) == null
                             ? "NONE"
                             : tableProperties.getProperty(Constants.DRUID_QUERY_GRANULARITY)),

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
index 8d22df6..e97f588 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
@@ -25,10 +25,10 @@ import com.google.common.base.Suppliers;
 import com.google.common.base.Throwables;
 import com.google.common.collect.FluentIterable;
 import com.google.common.collect.Lists;
-import com.metamx.common.Granularity;
 import io.druid.data.input.Committer;
 import io.druid.data.input.InputRow;
 import io.druid.data.input.MapBasedInputRow;
+import io.druid.java.util.common.granularity.Granularity;
 import io.druid.segment.indexing.DataSchema;
 import io.druid.segment.indexing.RealtimeTuningConfig;
 import io.druid.segment.loading.DataSegmentPusher;

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
new file mode 100644
index 0000000..0d56fc5
--- /dev/null
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.druid.serde;
+
+import io.druid.java.util.common.granularity.PeriodGranularity;
+
+import com.fasterxml.jackson.core.util.VersionUtil;
+import com.fasterxml.jackson.databind.module.SimpleModule;
+
+/**
+ * This class is used to define/override any serde behavior for classes from druid.
+ * Currently it is used to override the default behavior when serializing PeriodGranularity to include user timezone.
+ */
+public class HiveDruidSerializationModule extends SimpleModule {
+  private static final String NAME = "HiveDruidSerializationModule";
+  private static final VersionUtil VERSION_UTIL = new VersionUtil() {};
+
+  public HiveDruidSerializationModule() {
+    super(NAME, VERSION_UTIL.version());
+    addSerializer(PeriodGranularity.class, new PeriodGranularitySerializer());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java
new file mode 100644
index 0000000..3ea4727
--- /dev/null
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.druid.serde;
+
+import io.druid.java.util.common.granularity.PeriodGranularity;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+import com.fasterxml.jackson.databind.jsontype.TypeSerializer;
+
+import org.joda.time.DateTimeZone;
+
+import java.io.IOException;
+
+public class PeriodGranularitySerializer extends JsonSerializer<PeriodGranularity> {
+
+  @Override
+  public void serialize(PeriodGranularity granularity, JsonGenerator jsonGenerator,
+          SerializerProvider serializerProvider) throws IOException, JsonProcessingException {
+    // Set timezone based on user timezone if origin is not already set
+    // as it is default Hive time semantics to consider user timezone.
+    PeriodGranularity granularityWithUserTimezone = new PeriodGranularity(
+            granularity.getPeriod(),
+            granularity.getOrigin(),
+            DateTimeZone.getDefault()
+    );
+    granularityWithUserTimezone.serialize(jsonGenerator, serializerProvider);
+  }
+
+  @Override
+  public void serializeWithType(PeriodGranularity value, JsonGenerator gen,
+          SerializerProvider serializers, TypeSerializer typeSer) throws IOException {
+    serialize(value, gen, serializers);
+  }
+}
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/test/org/apache/hadoop/hive/druid/DerbyConnectorTestUtility.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/DerbyConnectorTestUtility.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/DerbyConnectorTestUtility.java
index f9304a5..627f078 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/druid/DerbyConnectorTestUtility.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/DerbyConnectorTestUtility.java
@@ -23,6 +23,8 @@ import com.google.common.base.Suppliers;
 import io.druid.metadata.MetadataStorageConnectorConfig;
 import io.druid.metadata.MetadataStorageTablesConfig;
 import io.druid.metadata.storage.derby.DerbyConnector;
+import io.druid.metadata.storage.derby.DerbyMetadataStorage;
+
 import org.junit.Assert;
 import org.junit.rules.ExternalResource;
 import org.skife.jdbi.v2.DBI;
@@ -46,7 +48,7 @@ public class DerbyConnectorTestUtility extends DerbyConnector {
           Supplier<MetadataStorageTablesConfig> dbTables,
           String jdbcUri
   ) {
-    super(config, dbTables, new DBI(jdbcUri + ";create=true"));
+    super(new DerbyMetadataStorage(config.get()), config, dbTables, new DBI(jdbcUri + ";create=true"));
     this.jdbcUri = jdbcUri;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidSerDe.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidSerDe.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidSerDe.java
index a67afdb..1bd5d84 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidSerDe.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidSerDe.java
@@ -554,7 +554,7 @@ public class TestDruidSerDe {
     Query<?> query = null;
     DruidQueryRecordReader<?, ?> reader = null;
     List<?> resultsList = null;
-    ObjectMapper mapper = new DefaultObjectMapper();
+    ObjectMapper mapper = DruidStorageHandlerUtils.JSON_MAPPER;
     switch (queryType) {
       case Query.TIMESERIES:
         query = mapper.readValue(jsonQuery, TimeseriesQuery.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
index 1fe155a..dca558e 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
@@ -193,7 +193,7 @@ public class TestDruidStorageHandler {
     LocalFileSystem localFileSystem = FileSystem.getLocal(config);
 
     Path segmentOutputPath = JobHelper
-            .makeSegmentOutputPath(new Path(segmentRootPath), localFileSystem, dataSegment);
+            .makeFileNamePath(new Path(segmentRootPath), localFileSystem, dataSegment, JobHelper.INDEX_ZIP);
     Path indexPath = new Path(segmentOutputPath, "index.zip");
     DataSegment dataSegmentWithLoadspect = DataSegment.builder(dataSegment).loadSpec(
             ImmutableMap.<String, Object>of("path", indexPath)).build();

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
index bb4011b..2aeb279 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
@@ -147,8 +147,9 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase {
           + "\"dataSource\":{\"type\":\"table\",\"name\":\"sample_datasource\"},"
           + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\"]},"
           + "\"descending\":true,"
+          + "\"virtualColumns\":[],"
           + "\"filter\":null,"
-          + "\"granularity\":{\"type\":\"duration\",\"duration\":86400000,\"origin\":\"1969-12-31T16:00:00.000-08:00\"},"
+          + "\"granularity\":{\"type\":\"period\",\"period\":\"P1D\",\"timeZone\":\"America/Los_Angeles\",\"origin\":null},"
           + "\"aggregations\":[],"
           + "\"postAggregations\":[],"
           + "\"context\":null}, [localhost:8082]}]";
@@ -178,14 +179,15 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase {
   private static final String TOPN_QUERY_SPLIT =
       "[HiveDruidSplit{{\"queryType\":\"topN\","
           + "\"dataSource\":{\"type\":\"table\",\"name\":\"sample_data\"},"
-          + "\"dimension\":{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"sample_dim\",\"outputName\":\"sample_dim\"},"
+          + "\"virtualColumns\":[],"
+          + "\"dimension\":{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"sample_dim\",\"outputName\":\"sample_dim\",\"outputType\":\"STRING\"},"
           + "\"metric\":{\"type\":\"LegacyTopNMetricSpec\",\"metric\":\"count\"},"
           + "\"threshold\":5,"
           + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2013-08-31T00:00:00.000-07:00/2013-09-03T00:00:00.000-07:00\"]},"
           + "\"filter\":null,"
           + "\"granularity\":{\"type\":\"all\"},"
-          + "\"aggregations\":[{\"type\":\"longSum\",\"name\":\"count\",\"fieldName\":\"count\"},"
-          + "{\"type\":\"doubleSum\",\"name\":\"some_metric\",\"fieldName\":\"some_metric\"}],"
+          + "\"aggregations\":[{\"type\":\"longSum\",\"name\":\"count\",\"fieldName\":\"count\",\"expression\":null},"
+          + "{\"type\":\"doubleSum\",\"name\":\"some_metric\",\"fieldName\":\"some_metric\",\"expression\":null}],"
           + "\"postAggregations\":[],"
           + "\"context\":null,"
           + "\"descending\":false}, [localhost:8082]}]";
@@ -209,12 +211,13 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase {
       "[HiveDruidSplit{{\"queryType\":\"groupBy\","
           + "\"dataSource\":{\"type\":\"table\",\"name\":\"sample_datasource\"},"
           + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\"]},"
+          + "\"virtualColumns\":[],"
           + "\"filter\":null,"
-          + "\"granularity\":{\"type\":\"duration\",\"duration\":86400000,\"origin\":\"1969-12-31T16:00:00.000-08:00\"},"
-          + "\"dimensions\":[{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"country\",\"outputName\":\"country\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"device\",\"outputName\":\"device\"}],"
-          + "\"aggregations\":[{\"type\":\"longSum\",\"name\":\"total_usage\",\"fieldName\":\"user_count\"},"
-          + "{\"type\":\"doubleSum\",\"name\":\"data_transfer\",\"fieldName\":\"data_transfer\"}],"
+          + "\"granularity\":{\"type\":\"period\",\"period\":\"P1D\",\"timeZone\":\"America/Los_Angeles\",\"origin\":null},"
+          + "\"dimensions\":[{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"country\",\"outputName\":\"country\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"device\",\"outputName\":\"device\",\"outputType\":\"STRING\"}],"
+          + "\"aggregations\":[{\"type\":\"longSum\",\"name\":\"total_usage\",\"fieldName\":\"user_count\",\"expression\":null},"
+          + "{\"type\":\"doubleSum\",\"name\":\"data_transfer\",\"fieldName\":\"data_transfer\",\"expression\":null}],"
           + "\"postAggregations\":[],"
           + "\"having\":null,"
           + "\"limitSpec\":{\"type\":\"default\",\"columns\":[{\"dimension\":\"country\",\"direction\":\"ascending\",\"dimensionOrder\":{\"type\":\"lexicographic\"}},"
@@ -238,15 +241,16 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase {
           + "\"descending\":false,"
           + "\"filter\":null,"
           + "\"granularity\":{\"type\":\"all\"},"
-          + "\"dimensions\":[{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"robot\",\"outputName\":\"robot\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"namespace\",\"outputName\":\"namespace\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"anonymous\",\"outputName\":\"anonymous\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"unpatrolled\",\"outputName\":\"unpatrolled\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"page\",\"outputName\":\"page\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"language\",\"outputName\":\"language\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"newpage\",\"outputName\":\"newpage\"},"
-          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"user\",\"outputName\":\"user\"}],"
+          + "\"dimensions\":[{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"robot\",\"outputName\":\"robot\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"namespace\",\"outputName\":\"namespace\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"anonymous\",\"outputName\":\"anonymous\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"unpatrolled\",\"outputName\":\"unpatrolled\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"page\",\"outputName\":\"page\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"language\",\"outputName\":\"language\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"newpage\",\"outputName\":\"newpage\",\"outputType\":\"STRING\"},"
+          + "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"user\",\"outputName\":\"user\",\"outputType\":\"STRING\"}],"
           + "\"metrics\":[\"count\",\"added\",\"delta\",\"variation\",\"deleted\"],"
+          + "\"virtualColumns\":[],"
           + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":5,\"fromNext\":false},"
           + "\"context\":{\"druid.query.fetch\":true}}, [localhost:8082]}]";
 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
index d9e01fe..d5b217a 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
@@ -23,7 +23,6 @@ import com.google.common.base.Function;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
-import com.metamx.common.Granularity;
 import io.druid.data.input.Firehose;
 import io.druid.data.input.InputRow;
 import io.druid.data.input.impl.DimensionSchema;
@@ -33,7 +32,7 @@ import io.druid.data.input.impl.MapInputRowParser;
 import io.druid.data.input.impl.StringDimensionSchema;
 import io.druid.data.input.impl.TimeAndDimsParseSpec;
 import io.druid.data.input.impl.TimestampSpec;
-import io.druid.granularity.QueryGranularities;
+import io.druid.java.util.common.granularity.Granularities;
 import io.druid.query.aggregation.AggregatorFactory;
 import io.druid.query.aggregation.LongSumAggregatorFactory;
 import io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory;
@@ -110,7 +109,7 @@ public class TestDruidRecordWriter {
           )
   );
 
-  // This test need this patch https://github.com/druid-io/druid/pull/3483
+  // This test fails due to conflict of guava classes with hive-exec jar.
   @Ignore
   @Test
   public void testWrite() throws IOException, SegmentLoadingException {
@@ -136,7 +135,7 @@ public class TestDruidRecordWriter {
                     new HyperUniquesAggregatorFactory("unique_hosts", "unique_hosts")
             },
             new UniformGranularitySpec(
-                    Granularity.DAY, QueryGranularities.NONE, ImmutableList.of(INTERVAL_FULL)
+                    Granularities.DAY, Granularities.NONE, ImmutableList.of(INTERVAL_FULL)
             ),
             objectMapper
     );
@@ -167,7 +166,7 @@ public class TestDruidRecordWriter {
               ) {
                 return new DruidWritable(ImmutableMap.<String, Object>builder().putAll(input)
                         .put(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME,
-                                Granularity.DAY.truncate(
+                                Granularities.DAY.bucketStart(
                                         new DateTime((long) input
                                                 .get(DruidTable.DEFAULT_TIMESTAMP_COLUMN)))
                                         .getMillis()
@@ -194,7 +193,7 @@ public class TestDruidRecordWriter {
             ImmutableList.of("host"),
             ImmutableList.of("visited_sum", "unique_hosts"),
             null,
-            QueryGranularities.NONE
+            Granularities.NONE
     );
 
     List<InputRow> rows = Lists.newArrayList();

http://git-wip-us.apache.org/repos/asf/hive/blob/bd3889e9/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 30fa50b..e3ff84f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -137,7 +137,7 @@
     <derby.version>10.10.2.0</derby.version>
     <dropwizard.version>3.1.0</dropwizard.version>
     <dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.2</dropwizard-metrics-hadoop-metrics2-reporter.version>
-    <druid.version>0.9.2</druid.version>
+    <druid.version>0.10.0</druid.version>
     <guava.version>14.0.1</guava.version>
     <groovy.version>2.4.4</groovy.version>
     <h2database.version>1.3.166</h2database.version>


[03/12] hive git commit: HIVE-16745 Syntax error in mysql patch script for HIVE-16556

Posted by we...@apache.org.
HIVE-16745 Syntax error in mysql patch script for HIVE-16556


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/573a1815
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/573a1815
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/573a1815

Branch: refs/heads/hive-14535
Commit: 573a1815fa78cb7759b217adad99a123fbcf4710
Parents: bd3889e
Author: Naveen Gangam <ng...@apache.org>
Authored: Fri May 26 15:15:12 2017 -0400
Committer: Naveen Gangam <ng...@apache.org>
Committed: Fri May 26 15:15:12 2017 -0400

----------------------------------------------------------------------
 metastore/scripts/upgrade/mysql/041-HIVE-16556.mysql.sql | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/573a1815/metastore/scripts/upgrade/mysql/041-HIVE-16556.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/041-HIVE-16556.mysql.sql b/metastore/scripts/upgrade/mysql/041-HIVE-16556.mysql.sql
index f8b97bb..848d546 100644
--- a/metastore/scripts/upgrade/mysql/041-HIVE-16556.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/041-HIVE-16556.mysql.sql
@@ -5,5 +5,5 @@ CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` (
   `PROPERTY_KEY` varchar(255) NOT NULL,
   `PROPERTY_VALUE` varchar(1000) NOT NULL,
   `DESCRIPTION` varchar(1000),
- PRIMARY KEY(`PROPERTY_KEY`),
+ PRIMARY KEY(`PROPERTY_KEY`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;


[08/12] hive git commit: HIVE-16777. LLAP: Use separate tokens and UGI instances when an external client is used. (Siddharth Seth, reviewed by Sergey Shelukhin)

Posted by we...@apache.org.
HIVE-16777. LLAP: Use separate tokens and UGI instances when an external client is used. (Siddharth Seth, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a18e7728
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a18e7728
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a18e7728

Branch: refs/heads/hive-14535
Commit: a18e7728144d525d14bb8e1d407d251c98cad956
Parents: 3330403
Author: Siddharth Seth <ss...@apache.org>
Authored: Sat May 27 10:26:30 2017 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Sat May 27 10:26:30 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hive/llap/daemon/impl/QueryInfo.java       |  2 ++
 .../hive/llap/daemon/impl/TaskRunnerCallable.java     | 14 +++++++++++++-
 2 files changed, 15 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a18e7728/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
index a6d9d54..6c891c9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
@@ -99,6 +99,8 @@ public class QueryInfo {
     final InetSocketAddress address =
         NetUtils.createSocketAddrForHost(amNodeId.getHostname(), amNodeId.getPort());
     SecurityUtil.setTokenService(appToken, address);
+    // TODO Caching this and re-using across submissions breaks AM recovery, since the
+    // new AM may run on a different host/port.
   }
 
   public QueryIdentifier getQueryIdentifier() {

http://git-wip-us.apache.org/repos/asf/hive/blob/a18e7728/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 7d7fd23..ceca1ad 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.ql.io.IOContextMap;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -219,7 +220,18 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
           TezCommonUtils.convertJobTokenToBytes(jobToken));
       Multimap<String, String> startedInputsMap = createStartedInputMap(vertex);
 
-      final UserGroupInformation taskOwner = fragmentInfo.getQueryInfo().getUmbilicalUgi();
+      final UserGroupInformation taskOwner;
+      if (!vertex.getIsExternalSubmission()) {
+        taskOwner = fragmentInfo.getQueryInfo().getUmbilicalUgi();
+      } else {
+        // Temporary, till the external interface makes use of a single connection per
+        // instance.
+        taskOwner = UserGroupInformation.createRemoteUser(vertex.getTokenIdentifier());
+        taskOwner.addToken(jobToken);
+        final InetSocketAddress address =
+            NetUtils.createSocketAddrForHost(request.getAmHost(), request.getAmPort());
+        SecurityUtil.setTokenService(jobToken, address);
+      }
       if (LOG.isDebugEnabled()) {
         LOG.debug("taskOwner hashCode:" + taskOwner.hashCode());
       }


[06/12] hive git commit: HIVE-16285: Servlet for dynamically configuring log levels (Prasanth Jayachandran reviewed by Siddharth Seth, Gopal V)

Posted by we...@apache.org.
HIVE-16285: Servlet for dynamically configuring log levels (Prasanth Jayachandran reviewed by Siddharth Seth, Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ca80968e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ca80968e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ca80968e

Branch: refs/heads/hive-14535
Commit: ca80968e039382b8def51adb2a4520e76c89f7fb
Parents: c3c6175
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Fri May 26 15:30:53 2017 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Fri May 26 15:30:53 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hive/http/HttpServer.java   |   1 +
 .../hive/http/Log4j2ConfiguratorServlet.java    | 275 +++++++++++++++++++
 .../llap/daemon/impl/TaskExecutorService.java   |  42 ++-
 .../llap/tezplugins/LlapTaskCommunicator.java   |   5 +-
 .../hive/ql/exec/AppMasterEventOperator.java    |  14 +-
 .../hadoop/hive/ql/exec/CommonJoinOperator.java |   2 +-
 .../hadoop/hive/ql/exec/DemuxOperator.java      |  26 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  20 +-
 .../hadoop/hive/ql/exec/GroupByOperator.java    |  17 +-
 .../hive/ql/exec/HashTableSinkOperator.java     |   4 +-
 .../hadoop/hive/ql/exec/JoinOperator.java       |   2 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |   8 +-
 .../apache/hadoop/hive/ql/exec/MapOperator.java |  10 +-
 .../apache/hadoop/hive/ql/exec/MuxOperator.java |   8 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    |  49 ++--
 .../hive/ql/exec/OrcFileMergeOperator.java      |   6 +-
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |  12 +-
 .../hadoop/hive/ql/exec/SMBMapJoinOperator.java |   8 +-
 .../hadoop/hive/ql/exec/ScriptOperator.java     |   8 +-
 .../hadoop/hive/ql/exec/SelectOperator.java     |   2 +-
 .../hadoop/hive/ql/exec/TableScanOperator.java  |   8 +-
 .../hadoop/hive/ql/exec/UnionOperator.java      |   2 +-
 .../hadoop/hive/ql/exec/mr/ExecReducer.java     |  12 +-
 .../hadoop/hive/ql/exec/mr/ObjectCache.java     |   5 +-
 .../ql/exec/spark/SparkMapRecordHandler.java    |   7 +-
 .../ql/exec/spark/SparkReduceRecordHandler.java |   9 +-
 .../ql/exec/tez/ColumnarSplitSizeEstimator.java |   5 +-
 .../tez/HostAffinitySplitLocationProvider.java  |   2 +-
 .../hive/ql/exec/tez/LlapObjectCache.java       |  12 +-
 .../hive/ql/exec/tez/RecordProcessor.java       |   6 -
 .../mapjoin/VectorMapJoinCommonOperator.java    |  12 +-
 .../VectorMapJoinGenerateResultOperator.java    |   8 +-
 .../VectorMapJoinInnerBigOnlyLongOperator.java  |   8 +-
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java |   8 +-
 ...VectorMapJoinInnerBigOnlyStringOperator.java |   8 +-
 .../mapjoin/VectorMapJoinInnerLongOperator.java |   8 +-
 .../VectorMapJoinInnerMultiKeyOperator.java     |   8 +-
 .../VectorMapJoinInnerStringOperator.java       |   8 +-
 .../VectorMapJoinLeftSemiLongOperator.java      |   8 +-
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  |   8 +-
 .../VectorMapJoinLeftSemiStringOperator.java    |   8 +-
 ...ectorMapJoinOuterGenerateResultOperator.java |  12 +-
 .../mapjoin/VectorMapJoinOuterLongOperator.java |  10 +-
 .../VectorMapJoinOuterMultiKeyOperator.java     |  10 +-
 .../VectorMapJoinOuterStringOperator.java       |  10 +-
 .../fast/VectorMapJoinFastBytesHashTable.java   |   6 +-
 .../fast/VectorMapJoinFastLongHashTable.java    |   6 +-
 .../VectorReduceSinkCommonOperator.java         |   8 +-
 .../hadoop/hive/ql/io/orc/ExternalCache.java    |   3 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  11 +-
 .../stats/annotation/StatsRulesProcFactory.java |  56 ++--
 .../hadoop/hive/serde2/lazy/LazyBinary.java     |   5 +-
 52 files changed, 528 insertions(+), 288 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/common/src/java/org/apache/hive/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java
index 0bc0032..7368a91 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -425,6 +425,7 @@ public class HttpServer {
     addServlet("jmx", "/jmx", JMXJsonServlet.class);
     addServlet("conf", "/conf", ConfServlet.class);
     addServlet("stacks", "/stacks", StackServlet.class);
+    addServlet("conflog", "/conflog", Log4j2ConfiguratorServlet.class);
 
     for (Pair<String, Class<? extends HttpServlet>> p : b.servlets) {
       addServlet(p.getFirst(), "/" + p.getFirst(), p.getSecond());

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
new file mode 100644
index 0000000..8042f21
--- /dev/null
+++ b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.http;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A servlet to configure log4j2.
+ * <br/>
+ * HTTP GET returns all loggers and it's log level in JSON formatted response.
+ * <br/>
+ * HTTP POST is used for configuring the loggers. POST data should be in the same format as GET's response.
+ * To configure (add/update existing loggers), use HTTP POST with logger names and level in the following JSON format.
+ *
+ * <br/>
+ * <pre>
+ * <code>{
+ *  "loggers": [ {
+ *    "logger" : "",
+ *    "level" : "INFO"
+ *  }, {
+ *    "logger" : "LlapIoOrc",
+ *    "level" : "WARN"
+ *  }, {
+ *    "logger" : "org.apache.zookeeper.server.NIOServerCnxn",
+ *    "level" : "WARN"
+ *  }]
+ * }<code>
+ * </pre>
+ *
+ * <br/>
+ * Example usage:
+ * <li>
+ *    Returns all loggers with levels in JSON format:
+ *    <pre>
+ *      curl http://hostame:port/conflog
+ *    </pre>
+ * </li>
+ * <li>
+ *    Set root logger to INFO:
+ *    <pre>
+ *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [ { "logger" : "", "level" : "INFO" } ] }'
+ *      http://hostame:port/conflog
+ *    </pre>
+ * </li>
+ * <li>
+ *    Set logger with level:
+ *    <pre>
+ *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
+ *      { "logger" : "LlapIoOrc", "level" : "INFO" } ] }' http://hostame:port/conflog
+ *    </pre>
+ * </li>
+ * <li>
+ *    Set log level for all classes under a package:
+ *    <pre>
+ *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
+ *      { "logger" : "org.apache.orc", "level" : "INFO" } ] }' http://hostame:port/conflog
+ *    </pre>
+ * </li>
+ * <li>
+ *    Set log levels for multiple loggers:
+ *    <pre>
+ *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [ { "logger" : "", "level" : "INFO" },
+ *      { "logger" : "LlapIoOrc", "level" : "WARN" },
+ *      { "logger" : "org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon", "level" : "INFO" },
+ *      { "logger" : "org.apache.orc", "level" : "INFO" } ] }' http://hostame:port/conflog
+ *    </pre>
+ * </li>
+ * <br/>
+ * Response Status Codes:
+ * <br/>
+ * <li>200 - OK : If the POST data is valid and if the request succeeds or if GET request succeeds.</li>
+ * <li>401 - UNAUTHORIZED : If the user does not have privileges to access instrumentation servlets.
+ *                      Refer <code>hadoop.security.instrumentation.requires.admin</code> config for more info.</li>
+ * <li>400 - BAD_REQUEST : If the POST data is not a valid JSON.</li>
+ * <li>500 - INTERNAL_SERVER_ERROR : If GET requests throws any IOException during JSON output generation.</li>
+ */
+public class Log4j2ConfiguratorServlet extends HttpServlet {
+  private static final long serialVersionUID = 1L;
+  private static final Logger LOG = LoggerFactory.getLogger(Log4j2ConfiguratorServlet.class);
+  private static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods";
+  private static final String ALLOWED_METHODS = "POST, GET";
+  private static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin";
+  private static final String CONTENT_TYPE_JSON_UTF8 = "application/json; charset=utf8";
+
+  private transient LoggerContext context;
+  private transient Configuration conf;
+
+  private static class ConfLoggers {
+    private List<ConfLogger> loggers;
+
+    public ConfLoggers() {
+      this.loggers = new ArrayList<>();
+    }
+
+    public List<ConfLogger> getLoggers() {
+      return loggers;
+    }
+
+    public void setLoggers(final List<ConfLogger> loggers) {
+      this.loggers = loggers;
+    }
+  }
+
+  private static class ConfLogger {
+    private String logger;
+    private String level;
+
+    // no-arg ctor required for JSON deserialization
+    public ConfLogger() {
+      this(null, null);
+    }
+
+    public ConfLogger(String logger, String level) {
+      this.logger = logger;
+      this.level = level;
+    }
+
+    public String getLogger() {
+      return logger == null ? logger : logger.trim();
+    }
+
+    public void setLogger(final String logger) {
+      this.logger = logger;
+    }
+
+    public String getLevel() {
+      return level == null ? level : level.trim().toUpperCase();
+    }
+
+    public void setLevel(final String level) {
+      this.level = level;
+    }
+  }
+
+  /**
+   * Initialize this servlet.
+   */
+  @Override
+  public void init() throws ServletException {
+    context = (LoggerContext) LogManager.getContext(false);
+    conf = context.getConfiguration();
+  }
+
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+    throws ServletException, IOException {
+    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+      request, response)) {
+      response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+      return;
+    }
+
+    setResponseHeader(response);
+
+    // list the loggers and their levels
+    listLoggers(response);
+  }
+
+  private void setResponseHeader(final HttpServletResponse response) {
+    response.setContentType(CONTENT_TYPE_JSON_UTF8);
+    response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS);
+    response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
+  }
+
+  @Override
+  protected void doPost(final HttpServletRequest request, final HttpServletResponse response)
+    throws ServletException, IOException {
+    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+      request, response)) {
+      response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+      return;
+    }
+    setResponseHeader(response);
+
+    String dataJson = request.getReader().lines().collect(Collectors.joining());
+    ObjectMapper objectMapper = new ObjectMapper();
+    try {
+      ConfLoggers confLoggers = objectMapper.readValue(dataJson, ConfLoggers.class);
+      configureLogger(confLoggers);
+    } catch (IOException e) {
+      response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+      LOG.error("Error configuring log4j2 via /conflog endpoint.", e);
+      return;
+    }
+
+    response.setStatus(HttpServletResponse.SC_OK);
+  }
+
+  private void configureLogger(final ConfLoggers confLoggers) {
+    if (confLoggers != null) {
+      for (ConfLogger logger : confLoggers.getLoggers()) {
+        String loggerName = logger.getLogger();
+        Level logLevel = Level.getLevel(logger.getLevel());
+        if (logLevel == null) {
+          LOG.warn("Invalid log level: {} for logger: {}. Ignoring reconfiguration.", loggerName, logger.getLevel());
+          continue;
+        }
+
+        LoggerConfig loggerConfig = conf.getLoggerConfig(loggerName);
+        // if the logger name is not found, root logger is returned. We don't want to change root logger level
+        // since user either requested a new logger or specified invalid input. In which, we will add the logger
+        // that user requested.
+        if (!loggerName.equals(LogManager.ROOT_LOGGER_NAME) &&
+          loggerConfig.getName().equals(LogManager.ROOT_LOGGER_NAME)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Requested logger ({}) not found. Adding as new logger with {} level", loggerName, logLevel);
+          }
+          // requested logger not found. Add the new logger with the requested level
+          conf.addLogger(loggerName, new LoggerConfig(loggerName, logLevel, true));
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Updating logger ({}) to {} level", loggerName, logLevel);
+          }
+          // update the log level for the specified logger
+          loggerConfig.setLevel(logLevel);
+        }
+      }
+      context.updateLoggers(conf);
+    }
+  }
+
+  private void listLoggers(final HttpServletResponse response) throws IOException {
+    PrintWriter writer = null;
+    try {
+      writer = response.getWriter();
+      ConfLoggers confLoggers = new ConfLoggers();
+      Collection<LoggerConfig> loggerConfigs = conf.getLoggers().values();
+      loggerConfigs.forEach(lc -> confLoggers.getLoggers().add(new ConfLogger(lc.getName(), lc.getLevel().toString())));
+      ObjectMapper objectMapper = new ObjectMapper();
+      objectMapper.writerWithDefaultPrettyPrinter().writeValue(writer, confLoggers);
+    } catch (IOException e) {
+      LOG.error("Caught an exception while processing Log4j2 configuration request", e);
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+      return;
+    } finally {
+      if (writer != null) {
+        writer.close();
+      }
+    }
+    response.setStatus(HttpServletResponse.SC_OK);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 7f8c947..dd459b1 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -84,8 +84,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 public class TaskExecutorService extends AbstractService
     implements Scheduler<TaskRunnerCallable>, SchedulerFragmentCompletingListener {
   private static final Logger LOG = LoggerFactory.getLogger(TaskExecutorService.class);
-  private static final boolean isInfoEnabled = LOG.isInfoEnabled();
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
   private static final String TASK_EXECUTOR_THREAD_NAME_FORMAT = "Task-Executor-%d";
   private static final String WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT = "Wait-Queue-Scheduler-%d";
   private static final long PREEMPTION_KILL_GRACE_MS = 500; // 500ms
@@ -294,7 +292,7 @@ public class TaskExecutorService extends AbstractService
             // (numSlotsAvailable can go negative, if the callback after the thread completes is delayed)
             boolean shouldWait = numSlotsAvailable.get() <= 0 && lastKillTimeMs == null;
             if (task.getTaskRunnerCallable().canFinish()) {
-              if (isDebugEnabled) {
+              if (LOG.isDebugEnabled()) {
                 LOG.debug("Attempting to schedule task {}, canFinish={}. Current state: "
                     + "preemptionQueueSize={}, numSlotsAvailable={}, waitQueueSize={}",
                     task.getRequestId(), task.getTaskRunnerCallable().canFinish(),
@@ -335,7 +333,7 @@ public class TaskExecutorService extends AbstractService
                 lock.wait(PREEMPTION_KILL_GRACE_SLEEP_MS);
               }
             } else {
-              if (isDebugEnabled && lastKillTimeMs != null) {
+              if (LOG.isDebugEnabled() && lastKillTimeMs != null) {
                 LOG.debug("Grace period ended for the previous kill; preemtping more tasks");
               }
               if (handleScheduleAttemptedRejection(task)) {
@@ -406,18 +404,18 @@ public class TaskExecutorService extends AbstractService
       if (evictedTask == null || !evictedTask.equals(taskWrapper)) {
         knownTasks.put(taskWrapper.getRequestId(), taskWrapper);
         taskWrapper.setIsInWaitQueue(true);
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("{} added to wait queue. Current wait queue size={}", task.getRequestId(),
               waitQueue.size());
         }
 
         result = evictedTask == null ? SubmissionState.ACCEPTED : SubmissionState.EVICTED_OTHER;
 
-        if (isDebugEnabled && evictedTask != null) {
+        if (LOG.isDebugEnabled() && evictedTask != null) {
           LOG.debug("Eviction: {} {} {}", taskWrapper, result, evictedTask);
         }
       } else {
-        if (isInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info(
               "wait queue full, size={}. numSlotsAvailable={}, runningFragmentCount={}. {} not added",
               waitQueue.size(), numSlotsAvailable.get(), runningFragmentCount.get(), task.getRequestId());
@@ -426,7 +424,7 @@ public class TaskExecutorService extends AbstractService
 
         result = SubmissionState.REJECTED;
 
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("{} is {} as wait queue is full", taskWrapper.getRequestId(), result);
         }
         if (metrics != null) {
@@ -440,7 +438,7 @@ public class TaskExecutorService extends AbstractService
       // after some other submission has evicted it.
       boolean stateChanged = !taskWrapper.maybeRegisterForFinishedStateNotifications(canFinish);
       if (stateChanged) {
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Finishable state of {} updated to {} during registration for state updates",
               taskWrapper.getRequestId(), !canFinish);
         }
@@ -455,12 +453,12 @@ public class TaskExecutorService extends AbstractService
     // Register for state change notifications so that the waitQueue can be re-ordered correctly
     // if the fragment moves in or out of the finishable state.
 
-    if (isDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Wait Queue: {}", waitQueue);
     }
 
     if (evictedTask != null) {
-      if (isInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("{} evicted from wait queue in favor of {} because of lower priority",
             evictedTask.getRequestId(), task.getRequestId());
       }
@@ -503,7 +501,7 @@ public class TaskExecutorService extends AbstractService
       // Can be null since the task may have completed meanwhile.
       if (taskWrapper != null) {
         if (taskWrapper.isInWaitQueue()) {
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Removing {} from waitQueue", fragmentId);
           }
           taskWrapper.setIsInWaitQueue(false);
@@ -514,7 +512,7 @@ public class TaskExecutorService extends AbstractService
           }
         }
         if (taskWrapper.isInPreemptionQueue()) {
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Removing {} from preemptionQueue", fragmentId);
           }
           removeFromPreemptionQueue(taskWrapper);
@@ -558,7 +556,7 @@ public class TaskExecutorService extends AbstractService
   @VisibleForTesting
   /** Assumes the epic lock is already taken. */
   void tryScheduleUnderLock(final TaskWrapper taskWrapper) throws RejectedExecutionException {
-    if (isInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Attempting to execute {}", taskWrapper);
     }
     ListenableFuture<TaskRunner2Result> future = executorService.submit(
@@ -572,7 +570,7 @@ public class TaskExecutorService extends AbstractService
     Futures.addCallback(future, wrappedCallback, executionCompletionExecutorService);
 
     boolean canFinish = taskWrapper.getTaskRunnerCallable().canFinish();
-    if (isDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("{} scheduled for execution. canFinish={}", taskWrapper.getRequestId(), canFinish);
     }
 
@@ -580,7 +578,7 @@ public class TaskExecutorService extends AbstractService
     // to the tasks are not ready yet, the task is eligible for pre-emptable.
     if (enablePreemption) {
       if (!canFinish) {
-        if (isInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("{} is not finishable. Adding it to pre-emption queue",
               taskWrapper.getRequestId());
         }
@@ -596,7 +594,7 @@ public class TaskExecutorService extends AbstractService
   private boolean handleScheduleAttemptedRejection(TaskWrapper taskWrapper) {
     if (enablePreemption && taskWrapper.getTaskRunnerCallable().canFinish()
         && !preemptionQueue.isEmpty()) {
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Preemption Queue: " + preemptionQueue);
       }
 
@@ -610,7 +608,7 @@ public class TaskExecutorService extends AbstractService
               pRequest.getRequestId());
           continue; // Try something else.
         }
-        if (isInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("Invoking kill task for {} due to pre-emption to run {}",
               pRequest.getRequestId(), taskWrapper.getRequestId());
         }
@@ -767,7 +765,7 @@ public class TaskExecutorService extends AbstractService
       if (enablePreemption) {
         String state = reason == null ? "FAILED" : reason.name();
         boolean removed = removeFromPreemptionQueueUnlocked(taskWrapper);
-        if (removed && isInfoEnabled) {
+        if (removed && LOG.isInfoEnabled()) {
           TaskRunnerCallable trc = taskWrapper.getTaskRunnerCallable();
           LOG.info(TaskRunnerCallable.getTaskIdentifierString(trc.getRequest(),
               trc.getVertexSpec(), trc.getQueryId()) + " request " + state + "! Removed from preemption list.");
@@ -778,7 +776,7 @@ public class TaskExecutorService extends AbstractService
       if (metrics != null) {
         metrics.setNumExecutorsAvailable(numSlotsAvailable.get());
       }
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Task {} complete. WaitQueueSize={}, numSlotsAvailable={}, preemptionQueueSize={}",
           taskWrapper.getRequestId(), waitQueue.size(), numSlotsAvailable.get(),
           preemptionQueue.size());
@@ -831,7 +829,7 @@ public class TaskExecutorService extends AbstractService
   public void shutDown(boolean awaitTermination) {
     if (!isShutdown.getAndSet(true)) {
       if (awaitTermination) {
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("awaitTermination: " + awaitTermination + " shutting down task executor" +
               " service gracefully");
         }
@@ -839,7 +837,7 @@ public class TaskExecutorService extends AbstractService
         shutdownExecutor(executorService);
         shutdownExecutor(executionCompletionExecutorService);
       } else {
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("awaitTermination: " + awaitTermination + " shutting down task executor" +
               " service immediately");
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
----------------------------------------------------------------------
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index 18ce03c..ff00aba 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -102,7 +102,6 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
 
   private static final Logger LOG = LoggerFactory.getLogger(LlapTaskCommunicator.class);
 
-  private static final boolean isInfoEnabled = LOG.isInfoEnabled();
   private static final String RESOURCE_URI_STR = "/ws/v1/applicationhistory";
   private static final Joiner JOINER = Joiner.on("");
   private static final Joiner PATH_JOINER = Joiner.on("/");
@@ -598,7 +597,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
     Long old = knownNodeMap.putIfAbsent(nodeId,
         TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS));
     if (old == null) {
-      if (isInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Added new known node: {}", nodeId);
       }
     }
@@ -609,7 +608,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
     PingingNodeInfo ni = new PingingNodeInfo(currentTs);
     PingingNodeInfo old = pingedNodeMap.put(nodeId, ni);
     if (old == null) {
-      if (isInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Added new pinging node: [{}]", nodeId);
       }
     } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
index bf30ef1..b151a1d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
@@ -93,9 +93,9 @@ public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
       Writable writableRow = serializer.serialize(row, rowInspector);
       writableRow.write(buffer);
       if (buffer.getLength() > MAX_SIZE) {
-	if (isLogInfoEnabled) {
-	  LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength());
-	}
+        if (LOG.isInfoEnabled()) {
+          LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength());
+        }
         hasReachedMaxSize = true;
         buffer = null;
       }
@@ -103,7 +103,7 @@ public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
       throw new HiveException(e);
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("AppMasterEvent: " + row);
     }
     forward(row, rowInspector);
@@ -130,9 +130,9 @@ public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
           InputInitializerEvent.create(vertexName, inputName,
               ByteBuffer.wrap(payload, 0, payload.length));
 
-      if (isLogInfoEnabled) {
-	LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName
-	    + ". Payload size = " + payload.length);
+      if (LOG.isInfoEnabled()) {
+        LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName
+          + ". Payload size = " + payload.length);
       }
 
       context.getTezProcessorContext().sendEvents(Collections.singletonList(event));

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index df1898e..07fd653 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -383,7 +383,7 @@ public abstract class CommonJoinOperator<T extends JoinDesc> extends
       joinEmitInterval = -1;
     }
 
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
index c184742..a9f2218 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
@@ -188,7 +188,7 @@ public class DemuxOperator extends Operator<DemuxDesc>
       }
       newChildOperatorsTag[i] = toArray(childOperatorTags);
     }
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("newChildOperatorsTag " + Arrays.toString(newChildOperatorsTag));
     }
 
@@ -214,15 +214,14 @@ public class DemuxOperator extends Operator<DemuxDesc>
   @Override
   protected void initializeChildren(Configuration hconf) throws HiveException {
     state = State.INIT;
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Operator " + id + " " + getName() + " initialized");
       LOG.info("Initializing children of " + id + " " + getName());
     }
     for (int i = 0; i < childOperatorsArray.length; i++) {
-      if (isLogInfoEnabled) {
-	LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
-	    childOperatorsArray[i].getName() +
-	    " " + childInputObjInspectors[i].length);
+      if (LOG.isInfoEnabled()) {
+        LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
+          childOperatorsArray[i].getName() + " " + childInputObjInspectors[i].length);
       }
       // We need to initialize those MuxOperators first because if we first
       // initialize other operators, the states of all parents of those MuxOperators
@@ -247,10 +246,9 @@ public class DemuxOperator extends Operator<DemuxDesc>
       }
     }
     for (int i = 0; i < childOperatorsArray.length; i++) {
-      if (isLogInfoEnabled) {
-	LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
-	    childOperatorsArray[i].getName() +
-	    " " + childInputObjInspectors[i].length);
+      if (LOG.isInfoEnabled()) {
+        LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
+          childOperatorsArray[i].getName() + " " + childInputObjInspectors[i].length);
       }
       if (!(childOperatorsArray[i] instanceof MuxOperator)) {
         childOperatorsArray[i].initialize(hconf, childInputObjInspectors[i]);
@@ -275,7 +273,7 @@ public class DemuxOperator extends Operator<DemuxDesc>
     endGroupIfNecessary(currentChildIndex);
 
     int oldTag = newTagToOldTag[tag];
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       cntrs[tag]++;
       if (cntrs[tag] == nextCntrs[tag]) {
         LOG.debug(id + " (newTag, childIndex, oldTag)=(" + tag + ", " + currentChildIndex + ", "
@@ -311,9 +309,9 @@ public class DemuxOperator extends Operator<DemuxDesc>
       int newTag = i;
       int oldTag = newTagToOldTag[i];
       int childIndex = newTagToChildIndex[newTag];
-      if (isLogInfoEnabled) {
-	LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", "
-	    + oldTag + "),  forwarded " + cntrs[newTag] + " rows");
+      if (LOG.isInfoEnabled()) {
+        LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", "
+          + oldTag + "),  forwarded " + cntrs[newTag] + " rows");
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index f3c571a..3e09432 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -93,8 +93,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     Serializable {
 
   public static final Logger LOG = LoggerFactory.getLogger(FileSinkOperator.class);
-  private static final boolean isInfoEnabled = LOG.isInfoEnabled();
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
 
   protected transient HashMap<String, FSPaths> valToPaths;
   protected transient int numDynParts;
@@ -160,7 +158,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       finalPaths = new Path[numFiles];
       outWriters = new RecordWriter[numFiles];
       updaters = new RecordUpdater[numFiles];
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Created slots for  " + numFiles);
       }
       stat = new Stat();
@@ -378,7 +376,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       serializer.initialize(unsetNestedColumnPaths(hconf), conf.getTableInfo().getProperties());
       outputClass = serializer.getSerializedClass();
 
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Using serializer : " + serializer + " and formatter : " + hiveOutputFormat +
             (isCompressed ? " with compression" : ""));
       }
@@ -520,13 +518,13 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       Set<Integer> seenBuckets = new HashSet<Integer>();
       for (int idx = 0; idx < totalFiles; idx++) {
         if (this.getExecContext() != null && this.getExecContext().getFileId() != null) {
-          if (isInfoEnabled) {
+          if (LOG.isInfoEnabled()) {
             LOG.info("replace taskId from execContext ");
           }
 
           taskId = Utilities.replaceTaskIdFromFilename(taskId, this.getExecContext().getFileId());
 
-          if (isInfoEnabled) {
+          if (LOG.isInfoEnabled()) {
             LOG.info("new taskId: FS " + taskId);
           }
 
@@ -582,11 +580,11 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     try {
       if (isNativeTable) {
         fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null);
-        if (isInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]);
         }
         fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId);
-        if (isInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]);
         }
       } else {
@@ -603,7 +601,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension);
       }
 
-      if (isInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]);
       }
 
@@ -743,7 +741,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1);
       }
 
-      if ((++numRows == cntr) && isLogInfoEnabled) {
+      if ((++numRows == cntr) && LOG.isInfoEnabled()) {
         cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
         if (cntr < 0 || numRows < 0) {
           cntr = 0;
@@ -778,7 +776,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           fpaths.updaters[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 : ++fpaths.acidFileOffset] = HiveFileFormatUtils.getAcidRecordUpdater(
               jc, conf.getTableInfo(), bucketNum, conf, fpaths.outPaths[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset],
               rowInspector, reporter, 0);
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Created updater for bucket number " + bucketNum + " using file " +
                 fpaths.outPaths[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset]);
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index af5e90f..9c5e7e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -750,7 +750,7 @@ public class GroupByOperator extends Operator<GroupByDesc> {
           flushHashTable(true);
           hashAggr = false;
         } else {
-          if (isLogTraceEnabled) {
+          if (LOG.isTraceEnabled()) {
             LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl
                 + " #total = " + numRowsInput + " reduction = " + 1.0
                 * (numRowsHashTbl / numRowsInput) + " minReduction = "
@@ -948,7 +948,7 @@ public class GroupByOperator extends Operator<GroupByDesc> {
       // Update the number of entries that can fit in the hash table
       numEntriesHashTable =
           (int) (maxHashTblMemory / (fixedRowSize + (totalVariableSize / numEntriesVarSize)));
-      if (isLogTraceEnabled) {
+      if (LOG.isTraceEnabled()) {
         LOG.trace("Hash Aggr: #hash table = " + numEntries
             + " #max in hash table = " + numEntriesHashTable);
       }
@@ -999,14 +999,14 @@ public class GroupByOperator extends Operator<GroupByDesc> {
       }
       hashAggregations.clear();
       hashAggregations = null;
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Hash Table completed flushed");
       }
       return;
     }
 
     int oldSize = hashAggregations.size();
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Hash Tbl flush: #hash table = " + oldSize);
     }
     Iterator<Map.Entry<KeyWrapper, AggregationBuffer[]>> iter = hashAggregations
@@ -1018,7 +1018,7 @@ public class GroupByOperator extends Operator<GroupByDesc> {
       iter.remove();
       numDel++;
       if (numDel * 10 >= oldSize) {
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("Hash Table flushed: new size = " + hashAggregations.size());
         }
         return;
@@ -1058,10 +1058,9 @@ public class GroupByOperator extends Operator<GroupByDesc> {
   public void flush() throws HiveException{
     try {
       if (hashAggregations != null) {
-	if (isLogInfoEnabled) {
-	  LOG.info("Begin Hash Table flush: size = "
-	      + hashAggregations.size());
-	}
+        if (LOG.isInfoEnabled()) {
+          LOG.info("Begin Hash Table flush: size = " + hashAggregations.size());
+        }
         Iterator iter = hashAggregations.entrySet().iterator();
         while (iter.hasNext()) {
           Map.Entry<KeyWrapper, AggregationBuffer[]> m = (Map.Entry) iter

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
index 3a366f6..f8ea701 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
@@ -275,7 +275,7 @@ public class HashTableSinkOperator extends TerminalOperator<HashTableSinkDesc> i
   public void closeOp(boolean abort) throws HiveException {
     try {
       if (mapJoinTables == null) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("mapJoinTables is null");
         }
       } else {
@@ -292,7 +292,7 @@ public class HashTableSinkOperator extends TerminalOperator<HashTableSinkDesc> i
   protected void flushToFile() throws IOException, HiveException {
     // get tmp file URI
     Path tmpURI = getExecContext().getLocalWork().getTmpPath();
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Temp URI for side table: " + tmpURI);
     }
     for (byte tag = 0; tag < mapJoinTables.length; tag++) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
index 0282763..a4bca45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
@@ -113,7 +113,7 @@ public class JoinOperator extends CommonJoinOperator<JoinDesc> implements Serial
           storage[alias].clearRows();
         }
       } else {
-        if (isLogInfoEnabled && (sz == nextSz)) {
+        if (LOG.isInfoEnabled() && (sz == nextSz)) {
           // Print a message if we reached at least 1000 rows for a join operand
           // We won't print a message for the last join operand since the size
           // will never goes to joinEmitInterval.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 4971707..384e664 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -177,7 +177,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
        * requires changes in the Tez API with regard to finding bucket id and
        * also ability to schedule tasks to re-use containers that have cached the specific bucket.
        */
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("This is not bucket map join, so cache");
       }
 
@@ -318,7 +318,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
     try {
       loader.load(mapJoinTables, mapJoinTableSerdes);
     } catch (HiveException e) {
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Exception loading hash tables. Clearing partially loaded hash table containers.");
       }
 
@@ -558,7 +558,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
         }
       }
 
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("spilled: " + spilled + " abort: " + abort + ". Clearing spilled partitions.");
       }
 
@@ -572,7 +572,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
         && (this.getExecContext().getLocalWork().getInputFileChangeSensitive())
         && !(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")
             && SparkUtilities.isDedicatedCluster(hconf))) {
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("MR: Clearing all map join table containers.");
       }
       clearAllTableContainers();

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
index 2a46b30..d801ae7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
@@ -428,7 +428,7 @@ public class MapOperator extends AbstractMapOperator {
 
       for (String alias : aliases) {
         Operator<? extends OperatorDesc> op = conf.getAliasToWork().get(alias);
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Adding alias " + alias + " to work list for file "
               + onefile);
         }
@@ -469,7 +469,7 @@ public class MapOperator extends AbstractMapOperator {
         if (prev != null && !prev.equals(context.rowObjectInspector)) {
           throw new HiveException("Conflict on row inspector for " + context.alias);
         }
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("dump " + context.op + " " + context.rowObjectInspector.getTypeName());
         }
       }
@@ -509,7 +509,7 @@ public class MapOperator extends AbstractMapOperator {
     Path fpath = getExecContext().getCurrentInputPath();
     String nominalPath = getNominalPath(fpath);
     Map<Operator<?>, MapOpCtx> contexts = opCtxMap.get(nominalPath);
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       StringBuilder builder = new StringBuilder();
       for (MapOpCtx context : contexts.values()) {
         if (builder.length() > 0) {
@@ -517,7 +517,7 @@ public class MapOperator extends AbstractMapOperator {
         }
         builder.append(context.alias);
       }
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath);
       }
     }
@@ -567,7 +567,7 @@ public class MapOperator extends AbstractMapOperator {
 
   protected final void rowsForwarded(int childrenDone, int rows) {
     numRows += rows;
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       while (numRows >= cntr) {
         cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
         if (cntr < 0 || numRows < 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
index 9849243..82d0017 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
@@ -225,13 +225,13 @@ public class MuxOperator extends Operator<MuxDesc> implements Serializable{
   @Override
   protected void initializeChildren(Configuration hconf) throws HiveException {
     state = State.INIT;
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Operator " + id + " " + getName() + " initialized");
     }
     if (childOperators == null || childOperators.isEmpty()) {
       return;
     }
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Initializing children of " + id + " " + getName());
     }
     childOperatorsArray[0].initialize(hconf, outputObjectInspectors);
@@ -242,7 +242,7 @@ public class MuxOperator extends Operator<MuxDesc> implements Serializable{
 
   @Override
   public void process(Object row, int tag) throws HiveException {
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       cntrs[tag]++;
       if (cntrs[tag] == nextCntrs[tag]) {
         LOG.info(id + ", tag=" + tag + ", forwarding " + cntrs[tag] + " rows");
@@ -317,7 +317,7 @@ public class MuxOperator extends Operator<MuxDesc> implements Serializable{
 
   @Override
   protected void closeOp(boolean abort) throws HiveException {
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       for (int i = 0; i < numParents; i++) {
         LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows");
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 8b04cd4..ffa5f41 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -219,9 +219,6 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   protected transient OutputCollector out;
   protected transient final Logger LOG = LoggerFactory.getLogger(getClass().getName());
   protected transient final Logger PLOG = LoggerFactory.getLogger(Operator.class.getName()); // for simple disabling logs from all operators
-  protected transient final boolean isLogInfoEnabled = LOG.isInfoEnabled() && PLOG.isInfoEnabled();
-  protected transient final boolean isLogDebugEnabled = LOG.isDebugEnabled() && PLOG.isDebugEnabled();
-  protected transient final boolean isLogTraceEnabled = LOG.isTraceEnabled() && PLOG.isTraceEnabled();
   protected transient String alias;
   protected transient Reporter reporter;
   protected String id;
@@ -330,7 +327,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       return;
     }
 
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("Initializing operator " + this);
     }
 
@@ -369,7 +366,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
           || childOperatorsArray.length != childOperators.size()) {
         throw new AssertionError("Internal error during operator initialization");
       }
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Initialization Done " + id + " " + getName());
       }
 
@@ -382,7 +379,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       }
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Initialization Done " + id + " " + getName() + " done is reset.");
     }
 
@@ -495,13 +492,13 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
    */
   protected void initializeChildren(Configuration hconf) throws HiveException {
     state = State.INIT;
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Operator " + id + " " + getName() + " initialized");
     }
     if (childOperators == null || childOperators.isEmpty()) {
       return;
     }
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Initializing children of " + id + " " + getName());
     }
     for (int i = 0; i < childOperatorsArray.length; i++) {
@@ -540,7 +537,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
    */
   protected void initialize(Configuration hconf, ObjectInspector inputOI,
       int parentId) throws HiveException {
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Initializing child " + id + " " + getName());
     }
     // Double the size of the array if needed
@@ -581,7 +578,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   public abstract void process(Object row, int tag) throws HiveException;
 
   protected final void defaultStartGroup() throws HiveException {
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Starting group");
     }
 
@@ -589,20 +586,20 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       return;
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Starting group for children:");
     }
     for (Operator<? extends OperatorDesc> op : childOperators) {
       op.startGroup();
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Start group Done");
     }
   }
 
   protected final void defaultEndGroup() throws HiveException {
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Ending group");
     }
 
@@ -610,14 +607,14 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       return;
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Ending group for children:");
     }
     for (Operator<? extends OperatorDesc> op : childOperators) {
       op.endGroup();
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("End group Done");
     }
   }
@@ -652,9 +649,9 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
         if(parent==null){
           continue;
         }
-	if (isLogDebugEnabled) {
-	  LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state);
-	}
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state);
+        }
         if (!(parent.state == State.CLOSE || parent.state == State.UNINIT)) {
           return false;
         }
@@ -667,7 +664,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   // since it is called by its parents' main thread, so no
   // more than 1 thread should call this close() function.
   public void close(boolean abort) throws HiveException {
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("close called for operator " + this);
     }
 
@@ -677,7 +674,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
 
     // check if all parents are finished
     if (!allInitializedParentsAreClosed()) {
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Not all parent operators are closed. Not closing.");
       }
       return;
@@ -686,7 +683,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
     // set state as CLOSE as long as all parents are closed
     // state == CLOSE doesn't mean all children are also in state CLOSE
     state = State.CLOSE;
-    if (isLogInfoEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.info("Closing operator " + this);
     }
 
@@ -705,13 +702,13 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       }
 
       for (Operator<? extends OperatorDesc> op : childOperators) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Closing child = " + op);
         }
         op.close(abort);
       }
 
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug(id + " Close done");
       }
     } catch (HiveException e) {
@@ -938,7 +935,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   }
 
   public void logStats() {
-    if (isLogInfoEnabled && !statsMap.isEmpty()) {
+    if (LOG.isInfoEnabled() && !statsMap.isEmpty()) {
       StringBuilder sb = new StringBuilder();
       for (Map.Entry<String, LongWritable> e : statsMap.entrySet()) {
         sb.append(e.getKey()).append(":").append(e.getValue()).append(", ");
@@ -1364,7 +1361,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   }
 
   public void setOpTraits(OpTraits metaInfo) {
-    if (isLogDebugEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.debug("Setting traits (" + metaInfo + ") on " + this);
     }
     if (conf != null) {
@@ -1375,7 +1372,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   }
 
   public void setStatistics(Statistics stats) {
-    if (isLogDebugEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.debug("Setting stats (" + stats + ") on " + this);
     }
     if (conf != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index e3cb765..d9547b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@ -100,7 +100,7 @@ public class OrcFileMergeOperator extends
       if (prevPath == null) {
         prevPath = k.getInputPath();
         reader = OrcFile.createReader(fs, k.getInputPath());
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("ORC merge file input path: " + k.getInputPath());
         }
       }
@@ -127,7 +127,7 @@ public class OrcFileMergeOperator extends
         }
 
         outWriter = OrcFile.createWriter(outPath, options);
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.info("ORC merge file output path: " + outPath);
         }
       }
@@ -152,7 +152,7 @@ public class OrcFileMergeOperator extends
       outWriter.appendStripe(buffer, 0, buffer.length, v.getStripeInformation(),
           v.getStripeStatistics());
 
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : "
             + v.getStripeInformation().getOffset() + " length: "
             + v.getStripeInformation().getLength() + " row: "

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index e03f4b7..92741ee 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -167,7 +167,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
 
       List<ExprNodeDesc> keys = conf.getKeyCols();
 
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("keys size is " + keys.size());
         for (ExprNodeDesc k : keys) {
           LOG.debug("Key exprNodeDesc " + k.getExprString());
@@ -215,7 +215,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
       tag = conf.getTag();
       tagByte[0] = (byte) tag;
       skipTag = conf.getSkipTag();
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("Using tag = " + tag);
       }
 
@@ -310,7 +310,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
         // TODO: this is fishy - we init object inspectors based on first tag. We
         //       should either init for each tag, or if rowInspector doesn't really
         //       matter, then we can create this in ctor and get rid of firstRow.
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("keys are " + conf.getOutputKeyColumnNames() + " num distributions: " +
               conf.getNumDistributionKeys());
         }
@@ -461,7 +461,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
       keyHashCode = ObjectInspectorUtils.getBucketHashCode(bucketFieldValues, partitionObjectInspectors);
     }
     int hashCode = buckNum < 0 ? keyHashCode : keyHashCode * 31 + buckNum;
-    if (isLogTraceEnabled) {
+    if (LOG.isTraceEnabled()) {
       LOG.trace("Going to return hash code " + hashCode);
     }
     return hashCode;
@@ -508,7 +508,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
     if (null != out) {
       numRows++;
       runTimeNumRows++;
-      if (isLogInfoEnabled) {
+      if (LOG.isTraceEnabled()) {
         if (numRows == cntr) {
           cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
           if (cntr < 0 || numRows < 0) {
@@ -543,7 +543,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
     out = null;
     random = null;
     reducerHash = null;
-    if (isLogInfoEnabled) {
+    if (LOG.isTraceEnabled()) {
       LOG.info(toString() + ": records written - " + numRows);
     }
     recordCounter.set(numRows);

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 7c1e344..64aa744 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -542,7 +542,7 @@ public class SMBMapJoinOperator extends AbstractMapJoinOperator<SMBJoinDesc> imp
     BucketMatcher bucketMatcher = ReflectionUtil.newInstance(bucketMatcherCls, null);
 
     getExecContext().setFileId(bucketMatcherCxt.createFileId(currentInputPath.toString()));
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("set task id: " + getExecContext().getFileId());
     }
 
@@ -768,9 +768,9 @@ public class SMBMapJoinOperator extends AbstractMapJoinOperator<SMBJoinDesc> imp
       }
       Integer current = top();
       if (current == null) {
-	if (isLogInfoEnabled) {
-	  LOG.info("MergeQueue forwarded " + counter + " rows");
-	}
+        if (LOG.isInfoEnabled()) {
+          LOG.info("MergeQueue forwarded " + counter + " rows");
+        }
         return null;
       }
       counter++;

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
index 4767af1..e15bbba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
@@ -300,7 +300,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
   }
 
   void displayBrokenPipeInfo() {
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("The script did not consume all input data. This is considered as an error.");
       LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it.");
     }
@@ -346,7 +346,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
         }
 
         String[] wrappedCmdArgs = addWrapper(cmdArgs);
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("Executing " + Arrays.asList(wrappedCmdArgs));
           LOG.info("tablename=" + tableName);
           LOG.info("partname=" + partitionName);
@@ -680,7 +680,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
       long now = System.currentTimeMillis();
       // reporter is a member variable of the Operator class.
       if (now - lastReportTime > 60 * 1000 && reporter != null) {
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("ErrorStreamProcessor calling reporter.progress()");
         }
         lastReportTime = now;
@@ -738,7 +738,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
           }
           proc.processLine(row);
         }
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("StreamThread " + name + " done");
         }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
index 94af097..9e96126 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
@@ -69,7 +69,7 @@ public class SelectOperator extends Operator<SelectDesc> implements Serializable
       eval = ExprNodeEvaluatorFactory.toCachedEvals(eval);
     }
     output = new Object[eval.length];
-    if (isLogInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       LOG.info("SELECT " + inputObjInspectors[0].getTypeName());
     }
     outputObjInspector = initEvaluatorsAndReturnStruct(eval, conf.getOutputColumnNames(),

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index 68477ca..17f2efb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -188,7 +188,7 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
           values.add(o == null ? defaultPartitionName : o.toString());
         }
         partitionSpecs = FileUtils.makePartName(conf.getPartColumns(), values);
-        if (isLogInfoEnabled) {
+        if (LOG.isInfoEnabled()) {
           LOG.info("Stats Gathering found a new partition spec = " + partitionSpecs);
         }
       }
@@ -331,7 +331,7 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
     sc.setStatsTmpDir(conf.getTmpStatsDir());
     if (!statsPublisher.connect(sc)) {
       // just return, stats gathering should not block the main query.
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         LOG.info("StatsPublishing error: cannot connect to database.");
       }
       if (isStatsReliable) {
@@ -355,8 +355,8 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
           throw new HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg());
         }
       }
-      if (isLogInfoEnabled) {
-	LOG.info("publishing : " + key + " : " + statsToPublish.toString());
+      if (LOG.isInfoEnabled()) {
+        LOG.info("publishing : " + key + " : " + statsToPublish.toString());
       }
     }
     if (!statsPublisher.closeConnection(sc)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
index 3df5533..99822a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
@@ -126,7 +126,7 @@ public class UnionOperator extends Operator<UnionDesc> implements Serializable {
       // to
       // create ObjectInspectors.
       needsTransform[p] = (inputObjInspectors[p] != outputObjInspector);
-      if (isLogInfoEnabled && needsTransform[p]) {
+      if (LOG.isInfoEnabled() && needsTransform[p]) {
         LOG.info("Union Operator needs to transform row from parent[" + p
             + "] from " + inputObjInspectors[p] + " to " + outputObjInspector);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
index 1dffff2..3af75d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
@@ -64,8 +64,6 @@ import org.apache.hadoop.util.StringUtils;
 public class ExecReducer extends MapReduceBase implements Reducer {
 
   private static final Logger LOG = LoggerFactory.getLogger("ExecReducer");
-  private static final boolean isInfoEnabled = LOG.isInfoEnabled();
-  private static final boolean isTraceEnabled = LOG.isTraceEnabled();
   private static final String PLAN_KEY = "__REDUCE_PLAN__";
 
   // Input value serde needs to be an array to support different SerDe
@@ -96,7 +94,7 @@ public class ExecReducer extends MapReduceBase implements Reducer {
     ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
     ObjectInspector keyObjectInspector;
 
-    if (isInfoEnabled) {
+    if (LOG.isInfoEnabled()) {
       try {
         LOG.info("conf classpath = "
             + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs()));
@@ -190,7 +188,7 @@ public class ExecReducer extends MapReduceBase implements Reducer {
           groupKey = new BytesWritable();
         } else {
           // If a operator wants to do some work at the end of a group
-          if (isTraceEnabled) {
+          if (LOG.isTraceEnabled()) {
             LOG.trace("End Group");
           }
           reducer.endGroup();
@@ -207,7 +205,7 @@ public class ExecReducer extends MapReduceBase implements Reducer {
         }
 
         groupKey.set(keyWritable.get(), 0, keyWritable.getSize());
-        if (isTraceEnabled) {
+        if (LOG.isTraceEnabled()) {
           LOG.trace("Start Group");
         }
         reducer.startGroup();
@@ -263,14 +261,14 @@ public class ExecReducer extends MapReduceBase implements Reducer {
   public void close() {
 
     // No row was processed
-    if (oc == null && isTraceEnabled) {
+    if (oc == null && LOG.isTraceEnabled()) {
       LOG.trace("Close called without any rows processed");
     }
 
     try {
       if (groupKey != null) {
         // If a operator wants to do some work at the end of a group
-        if (isTraceEnabled) {
+        if (LOG.isTraceEnabled()) {
           LOG.trace("End Group");
         }
         reducer.endGroup();

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
index cfe1750..5589a07 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
@@ -36,12 +36,11 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache {
 
   private static final Logger LOG = LoggerFactory.getLogger(ObjectCache.class.getName());
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
 
   @Override
   public void release(String key) {
     // nothing to do
-    if (isDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug(key + " no longer needed");
     }
   }
@@ -54,7 +53,7 @@ public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache {
   @Override
   public <T> T retrieve(String key, Callable<T> fn) throws HiveException {
     try {
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Creating " + key);
       }
       return fn.call();

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
index 48dfedc..8333cf5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
@@ -58,7 +58,6 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
   private static final Logger LOG = LoggerFactory.getLogger(SparkMapRecordHandler.class);
   private AbstractMapOperator mo;
   private MapredLocalWork localWork = null;
-  private boolean isLogInfoEnabled = false;
   private ExecMapperContext execContext;
 
   @Override
@@ -66,8 +65,6 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
     super.init(job, output, reporter);
 
-    isLogInfoEnabled = LOG.isInfoEnabled();
-
     try {
       jc = job;
       execContext = new ExecMapperContext(jc);
@@ -134,7 +131,7 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
       // Since there is no concept of a group, we don't invoke
       // startGroup/endGroup for a mapper
       mo.process((Writable) value);
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         logMemoryInfo();
       }
     } catch (Throwable e) {
@@ -182,7 +179,7 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
         }
       }
 
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         logCloseInfo();
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
index 7eaad18..e473580 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
@@ -75,7 +75,6 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
   private final Deserializer[] inputValueDeserializer = new Deserializer[Byte.MAX_VALUE];
   private final Object[] valueObject = new Object[Byte.MAX_VALUE];
   private final List<Object> row = new ArrayList<Object>(Utilities.reduceFieldNameList.size());
-  private final boolean isLogInfoEnabled = LOG.isInfoEnabled();
 
   // TODO: move to DynamicSerDe when it's ready
   private Deserializer inputKeyDeserializer;
@@ -338,7 +337,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
       row.clear();
       row.add(keyObject);
       row.add(valueObject[tag]);
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         logMemoryInfo();
       }
       try {
@@ -390,7 +389,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
           reducer.process(batch, tag);
           rowIdx = 0;
           batchBytes = 0;
-          if (isLogInfoEnabled) {
+          if (LOG.isInfoEnabled()) {
             logMemoryInfo();
           }
         }
@@ -399,7 +398,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
         VectorizedBatchUtil.setBatchSize(batch, rowIdx);
         reducer.process(batch, tag);
       }
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         logMemoryInfo();
       }
     } catch (Exception e) {
@@ -441,7 +440,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
         LOG.trace("End Group");
         reducer.endGroup();
       }
-      if (isLogInfoEnabled) {
+      if (LOG.isInfoEnabled()) {
         logCloseInfo();
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
index ecd4ddc..b63b673 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.mapred.split.SplitSizeEstimator;
  */
 public class ColumnarSplitSizeEstimator implements SplitSizeEstimator {
   private static final Logger LOG = LoggerFactory.getLogger(ColumnarSplitSizeEstimator.class);
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
 
   @Override
   public long getEstimatedSize(InputSplit inputSplit) throws IOException {
@@ -39,7 +38,7 @@ public class ColumnarSplitSizeEstimator implements SplitSizeEstimator {
 
     if (inputSplit instanceof ColumnarSplit) {
       colProjSize = ((ColumnarSplit) inputSplit).getColumnarProjectionSize();
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Estimated column projection size: " + colProjSize);
       }
     } else if (inputSplit instanceof HiveInputFormat.HiveInputSplit) {
@@ -47,7 +46,7 @@ public class ColumnarSplitSizeEstimator implements SplitSizeEstimator {
 
       if (innerSplit instanceof ColumnarSplit) {
         colProjSize = ((ColumnarSplit) innerSplit).getColumnarProjectionSize();
-        if (isDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Estimated column projection size: " + colProjSize);
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
index dcb985f..c5d96e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
@@ -58,7 +58,7 @@ public class HostAffinitySplitLocationProvider implements SplitLocationProvider
   @Override
   public String[] getLocations(InputSplit split) throws IOException {
     if (!(split instanceof FileSplit)) {
-      if (isDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Split: " + split + " is not a FileSplit. Using default locations");
       }
       return split.getLocations();

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
index 1ce8ee9..b26e0eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
@@ -44,8 +44,6 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
 
   private static ExecutorService staticPool = Executors.newCachedThreadPool();
 
-  private static final boolean isLogDebugEnabled = LOG.isDebugEnabled();
-
   private final Cache<String, Object> registry = CacheBuilder.newBuilder().softValues().build();
 
   private final Map<String, ReentrantLock> locks = new HashMap<String, ReentrantLock>();
@@ -67,7 +65,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
     lock.lock();
     try {
       value = (T) registry.getIfPresent(key);
-      if (value != null && isLogDebugEnabled) {
+      if (value != null && LOG.isDebugEnabled()) {
         LOG.debug("Found " + key + " in cache");
       }
       return value;
@@ -87,7 +85,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
     try {
       value = (T) registry.getIfPresent(key);
       if (value != null) {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Found " + key + " in cache");
         }
         return value;
@@ -109,7 +107,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
       try {
         value = (T) registry.getIfPresent(key);
         if (value != null) {
-          if (isLogDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("Found " + key + " in cache");
           }
           return value;
@@ -126,7 +124,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
 
       lock.lock();
       try {
-        if (isLogDebugEnabled) {
+        if (LOG.isDebugEnabled()) {
           LOG.debug("Caching new object for key: " + key);
         }
 
@@ -153,7 +151,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
 
   @Override
   public void remove(String key) {
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Removing key: " + key);
     }
     registry.invalidate(key);

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
index 106a534..d16a97a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
@@ -56,9 +56,6 @@ public abstract class RecordProcessor extends InterruptibleProcessing {
 
   public static final Logger l4j = LoggerFactory.getLogger(RecordProcessor.class);
 
-  // used to log memory usage periodically
-  protected boolean isLogInfoEnabled = false;
-  protected boolean isLogTraceEnabled = false;
   protected MRTaskReporter reporter;
 
   protected PerfLogger perfLogger = SessionState.getPerfLogger();
@@ -82,9 +79,6 @@ public abstract class RecordProcessor extends InterruptibleProcessing {
     this.inputs = inputs;
     this.outputs = outputs;
 
-    isLogInfoEnabled = l4j.isInfoEnabled();
-    isLogTraceEnabled = l4j.isTraceEnabled();
-
     checkAbortCondition();
 
     //log classpaths

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index f854132..7b8e7ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -280,7 +280,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
     outputProjection = projectionMapping.getOutputColumns();
     outputTypeInfos = projectionMapping.getTypeInfos();
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       int[] orderDisplayable = new int[order.length];
       for (int i = 0; i < order.length; i++) {
         orderDisplayable[i] = (int) order[i];
@@ -338,7 +338,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
    * columns and new scratch columns.
    */
   protected void setupVOutContext(List<String> outputColumnNames) {
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames);
     }
     if (outputColumnNames.size() != outputProjection.length) {
@@ -350,7 +350,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
       int outputColumn = outputProjection[i];
       vOutContext.addProjectionColumn(columnName, outputColumn);
 
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn);
       }
     }
@@ -423,7 +423,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
     needCommonSetup = true;
     needHashTableSetup = true;
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       int[] currentScratchColumns = vOutContext.currentScratchColumns();
       LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns));
 
@@ -515,7 +515,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
       overflowBatch.cols[outputColumn] = VectorizedBatchUtil.createColumnVector(typeInfo);
 
-      if (isLogDebugEnabled) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName());
       }
     }
@@ -526,7 +526,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
    */
   protected void commonSetup(VectorizedRowBatch batch) throws HiveException {
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("VectorMapJoinInnerCommonOperator commonSetup begin...");
       displayBatchColumns(batch, "batch");
       displayBatchColumns(overflowBatch, "overflowBatch");

http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index c4d5113..1c20d93 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -544,7 +544,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
     needHashTableSetup = true;
     LOG.info("Created " + vectorMapJoinHashTable.getClass().getSimpleName() + " from " + this.getClass().getSimpleName());
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug(CLASS_NAME + " reloadHashTable!");
     }
   }
@@ -553,7 +553,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
   protected void reProcessBigTable(int partitionId)
       throws HiveException {
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug(CLASS_NAME + " reProcessBigTable enter...");
     }
 
@@ -607,7 +607,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
       throw new HiveException(e);
     }
 
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug(CLASS_NAME + " reProcessBigTable exit! " + rowCount + " row processed and " + batchCount + " batches processed");
     }
   }
@@ -680,7 +680,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
     if (!aborted && overflowBatch.size > 0) {
       forwardOverflow();
     }
-    if (isLogDebugEnabled) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("VectorMapJoinInnerLongOperator closeOp " + batchCounter + " batches processed");
     }
   }


[04/12] hive git commit: HIVE-16343: LLAP: Publish YARN's ProcFs based memory usage to metrics for monitoring (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by we...@apache.org.
HIVE-16343: LLAP: Publish YARN's ProcFs based memory usage to metrics for monitoring (Prasanth Jayachandran reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c3c6175f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c3c6175f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c3c6175f

Branch: refs/heads/hive-14535
Commit: c3c6175fb4dbee40cd5f742a49414bf9ecb2f54a
Parents: 573a181
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Fri May 26 14:16:19 2017 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Fri May 26 14:16:19 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/llap/LlapDaemonInfo.java | 14 +++++---
 llap-server/bin/runLlapDaemon.sh                |  2 +-
 .../hive/llap/daemon/impl/LlapDaemon.java       |  8 ++---
 .../hive/llap/metrics/LlapDaemonJvmInfo.java    |  2 ++
 .../hive/llap/metrics/LlapDaemonJvmMetrics.java | 35 +++++++++++++++++---
 .../hive/llap/daemon/MiniLlapCluster.java       |  2 +-
 6 files changed, 47 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c3c6175f/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
----------------------------------------------------------------------
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
index fa29b59..dae10c8 100644
--- a/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
@@ -25,12 +25,13 @@ public enum LlapDaemonInfo {
 
   private static final class LlapDaemonInfoHolder {
     public LlapDaemonInfoHolder(int numExecutors, long executorMemory, long cacheSize,
-        boolean isDirectCache, boolean isLlapIo) {
+      boolean isDirectCache, boolean isLlapIo, final String pid) {
       this.numExecutors = numExecutors;
       this.executorMemory = executorMemory;
       this.cacheSize = cacheSize;
       this.isDirectCache = isDirectCache;
       this.isLlapIo = isLlapIo;
+      this.PID = pid;
     }
 
     final int numExecutors;
@@ -38,6 +39,7 @@ public enum LlapDaemonInfo {
     final long cacheSize;
     final boolean isDirectCache;
     final boolean isLlapIo;
+    final String PID;
   }
 
   // add more variables as required
@@ -51,13 +53,14 @@ public enum LlapDaemonInfo {
     long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
     boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT);
     boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true);
-    initialize(appName, numExecutors, executorMemoryBytes, ioMemoryBytes, isDirectCache, isLlapIo);
+    String pid = System.getenv("JVM_PID");
+    initialize(appName, numExecutors, executorMemoryBytes, ioMemoryBytes, isDirectCache, isLlapIo, pid);
   }
 
   public static void initialize(String appName, int numExecutors, long executorMemoryBytes,
-      long ioMemoryBytes, boolean isDirectCache, boolean isLlapIo) {
+    long ioMemoryBytes, boolean isDirectCache, boolean isLlapIo, final String pid) {
     INSTANCE.dataRef.set(new LlapDaemonInfoHolder(numExecutors, executorMemoryBytes, ioMemoryBytes,
-        isDirectCache, isLlapIo));
+        isDirectCache, isLlapIo, pid));
   }
 
   public boolean isLlap() {
@@ -89,4 +92,7 @@ public enum LlapDaemonInfo {
     return dataRef.get().isLlapIo;
   }
 
+  public String getPID() {
+    return dataRef.get().PID;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c3c6175f/llap-server/bin/runLlapDaemon.sh
----------------------------------------------------------------------
diff --git a/llap-server/bin/runLlapDaemon.sh b/llap-server/bin/runLlapDaemon.sh
index 82c2cc5..5a0c10e 100755
--- a/llap-server/bin/runLlapDaemon.sh
+++ b/llap-server/bin/runLlapDaemon.sh
@@ -127,6 +127,6 @@ LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dllap.daemon.log.file=${LLAP_DAEMON_LOG_F
 LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dllap.daemon.root.logger=${LLAP_DAEMON_LOGGER}"
 LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} -Dllap.daemon.log.level=${LLAP_DAEMON_LOG_LEVEL}"
 
+export JVM_PID="$$"
 exec "$JAVA" -Dproc_llapdaemon -Xms${LLAP_DAEMON_HEAPSIZE}m -Xmx${LLAP_DAEMON_HEAPSIZE}m ${LLAP_DAEMON_OPTS} -classpath "$CLASSPATH" $CLASS "$@"
 
-

http://git-wip-us.apache.org/repos/asf/hive/blob/c3c6175f/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index cfca3f7..68ef200 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -14,8 +14,6 @@
 
 package org.apache.hadoop.hive.llap.daemon.impl;
 
-import org.apache.hadoop.hive.llap.LlapOutputFormatService;
-
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryPoolMXBean;
@@ -42,6 +40,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.DaemonId;
 import org.apache.hadoop.hive.llap.LlapDaemonInfo;
+import org.apache.hadoop.hive.llap.LlapOutputFormatService;
 import org.apache.hadoop.hive.llap.LlapUtil;
 import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
@@ -91,7 +90,6 @@ import com.google.common.primitives.Ints;
 public class LlapDaemon extends CompositeService implements ContainerRunner, LlapDaemonMXBean {
 
   private static final Logger LOG = LoggerFactory.getLogger(LlapDaemon.class);
-
   private final Configuration shuffleHandlerConf;
   private final SecretManager secretManager;
   private final LlapProtocolServerImpl server;
@@ -245,7 +243,7 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
     pauseMonitor.start();
     String displayNameJvm = "LlapDaemonJvmMetrics-" + hostName;
     String sessionId = MetricsUtils.getUUID();
-    LlapDaemonJvmMetrics.create(displayNameJvm, sessionId);
+    LlapDaemonJvmMetrics.create(displayNameJvm, sessionId, daemonConf);
     String displayName = "LlapDaemonExecutorMetrics-" + hostName;
     daemonConf.set("llap.daemon.metrics.sessionid", sessionId);
     String[] strIntervals = HiveConf.getTrimmedStringsVar(daemonConf,
@@ -539,7 +537,7 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
 
       llapDaemon.init(daemonConf);
       llapDaemon.start();
-      LOG.info("Started LlapDaemon");
+      LOG.info("Started LlapDaemon with PID: {}", LlapDaemonInfo.INSTANCE.getPID());
       // Relying on the RPC threads to keep the service alive.
     } catch (Throwable t) {
       // TODO Replace this with a ExceptionHandler / ShutdownHook

http://git-wip-us.apache.org/repos/asf/hive/blob/c3c6175f/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
index efbddaa..11ac5a4 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
@@ -38,6 +38,8 @@ public enum LlapDaemonJvmInfo implements MetricsInfo {
   LlapDaemonMappedBufferMemoryUsed("Estimate of memory that JVM is using for mapped byte buffers in bytes"),
   LlapDaemonOpenFileDescriptorCount("Number of open file descriptors"),
   LlapDaemonMaxFileDescriptorCount("Maximum number of file descriptors"),
+  LlapDaemonResidentSetSize("Resident memory (RSS) used by llap daemon process in bytes"),
+  LlapDaemonVirtualMemorySize("Virtual memory (VMEM) used by llap daemon process in bytes")
   ;
 
   private final String desc;

http://git-wip-us.apache.org/repos/asf/hive/blob/c3c6175f/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmMetrics.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmMetrics.java
index cfb8729..be25846 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmMetrics.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmMetrics.java
@@ -26,6 +26,8 @@ import static org.apache.hadoop.hive.llap.metrics.LlapDaemonJvmInfo.LlapDaemonMa
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonJvmInfo.LlapDaemonMappedBufferTotalCapacity;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonJvmInfo.LlapDaemonMaxFileDescriptorCount;
 import static org.apache.hadoop.hive.llap.metrics.LlapDaemonJvmInfo.LlapDaemonOpenFileDescriptorCount;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonJvmInfo.LlapDaemonResidentSetSize;
+import static org.apache.hadoop.hive.llap.metrics.LlapDaemonJvmInfo.LlapDaemonVirtualMemorySize;
 import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
 import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
 
@@ -34,12 +36,17 @@ import java.lang.management.ManagementFactory;
 import java.lang.management.OperatingSystemMXBean;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.llap.LlapDaemonInfo;
+import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon;
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
 
 import com.sun.management.UnixOperatingSystemMXBean;
 
@@ -51,24 +58,34 @@ public class LlapDaemonJvmMetrics implements MetricsSource {
   private final String name;
   private final String sessionId;
   private final MetricsRegistry registry;
+  private final ResourceCalculatorProcessTree processTree;
+  private final String daemonPid = LlapDaemonInfo.INSTANCE.getPID();
 
-  private LlapDaemonJvmMetrics(String displayName, String sessionId) {
+  private LlapDaemonJvmMetrics(String displayName, String sessionId, final Configuration conf) {
     this.name = displayName;
     this.sessionId = sessionId;
+    Class<? extends ResourceCalculatorProcessTree> clazz = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE,
+      null, ResourceCalculatorProcessTree.class);
+    this.processTree = ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("" + daemonPid, clazz, conf);
+    if (processTree != null) {
+      this.processTree.setConf(conf);
+    }
     this.registry = new MetricsRegistry("LlapDaemonJvmRegistry");
     this.registry.tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
   }
 
-  public static LlapDaemonJvmMetrics create(String displayName, String sessionId) {
+  public static LlapDaemonJvmMetrics create(String displayName, String sessionId,
+    final Configuration conf) {
     MetricsSystem ms = LlapMetricsSystem.instance();
-    return ms.register(displayName, "LlapDaemon JVM Metrics", new LlapDaemonJvmMetrics(displayName, sessionId));
+    return ms.register(displayName, "LlapDaemon JVM Metrics",
+      new LlapDaemonJvmMetrics(displayName, sessionId, conf));
   }
 
   @Override
   public void getMetrics(MetricsCollector collector, boolean b) {
     MetricsRecordBuilder rb = collector.addRecord(LlapDaemonJVMMetrics)
         .setContext("jvm")
-        .tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME)
+        .tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME + "(PID: " + daemonPid + ")")
         .tag(SessionId, sessionId);
     getJvmMetrics(rb);
   }
@@ -100,6 +117,12 @@ public class LlapDaemonJvmMetrics implements MetricsSource {
       openFileHandles = ((UnixOperatingSystemMXBean) os).getOpenFileDescriptorCount();
       maxFileHandles = ((UnixOperatingSystemMXBean) os).getMaxFileDescriptorCount();
     }
+    long rss = 0;
+    long vmem = 0;
+    if (processTree != null) {
+      rss = processTree.getRssMemorySize();
+      vmem = processTree.getVirtualMemorySize();
+    }
     rb.addGauge(LlapDaemonDirectBufferCount, directBufferCount)
       .addGauge(LlapDaemonDirectBufferTotalCapacity, directBufferTotalCapacity)
       .addGauge(LlapDaemonDirectBufferMemoryUsed, directBufferMemoryUsed)
@@ -107,7 +130,9 @@ public class LlapDaemonJvmMetrics implements MetricsSource {
       .addGauge(LlapDaemonMappedBufferTotalCapacity, mappedBufferTotalCapacity)
       .addGauge(LlapDaemonMappedBufferMemoryUsed, mappedBufferMemoryUsed)
       .addGauge(LlapDaemonOpenFileDescriptorCount, openFileHandles)
-      .addGauge(LlapDaemonMaxFileDescriptorCount, maxFileHandles);
+      .addGauge(LlapDaemonMaxFileDescriptorCount, maxFileHandles)
+      .addGauge(LlapDaemonResidentSetSize, rss)
+      .addGauge(LlapDaemonVirtualMemorySize, vmem);
   }
 
   public String getName() {

http://git-wip-us.apache.org/repos/asf/hive/blob/c3c6175f/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
index 6f1305e..6af230e 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
@@ -121,7 +121,7 @@ public class MiniLlapCluster extends AbstractService {
     this.ioBytesPerService = ioBytesPerService;
 
     LlapDaemonInfo.initialize("mini-llap-cluster", numExecutorsPerService, execMemoryPerService,
-        ioBytesPerService, ioIsDirect, llapIoEnabled);
+        ioBytesPerService, ioIsDirect, llapIoEnabled, "-1");
 
     // Setup Local Dirs
     localDirs = new String[numLocalDirs];