You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by bs...@apache.org on 2019/09/09 08:55:50 UTC

[hive] branch master updated: HIVE-22168: Remove very expensive logging from the llap cache hotpath (Slim B via Jesus Camacho Rodriguez)

This is an automated email from the ASF dual-hosted git repository.

bslim pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 1f10d58  HIVE-22168: Remove very expensive logging from the llap cache hotpath (Slim B via Jesus Camacho Rodriguez)
1f10d58 is described below

commit 1f10d587620769c1647262b1956880296f95f0cd
Author: Slim Bouguerra <bs...@apache.org>
AuthorDate: Mon Sep 9 09:55:41 2019 +0100

    HIVE-22168: Remove very expensive logging from the llap cache hotpath (Slim B via Jesus Camacho Rodriguez)
---
 .../java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java   |  4 ++--
 .../hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java     | 12 ++++++------
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
index 8370aa6..af04a51 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
@@ -213,8 +213,8 @@ public class LlapCacheAwareFs extends FileSystem {
           return new CacheChunk(buffer, startOffset, endOffset);
         }
       }, gotAllData);
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Buffers after cache " + RecordReaderUtils.stringifyDiskRanges(drl));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Buffers after cache " + RecordReaderUtils.stringifyDiskRanges(drl));
       }
       if (gotAllData.value) {
         long sizeRead = 0;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index 346ab5c..241a300 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -592,15 +592,15 @@ class EncodedReaderImpl implements EncodedReader {
       long stripeOffset, boolean hasFileId, IdentityHashMap<ByteBuffer, Boolean> toRelease)
           throws IOException {
     DiskRangeList.MutateHelper toRead = new DiskRangeList.MutateHelper(listToRead);
-    if (LOG.isInfoEnabled()) {
-      LOG.info("Resulting disk ranges to read (file " + fileKey + "): "
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Resulting disk ranges to read (file " + fileKey + "): "
           + RecordReaderUtils.stringifyDiskRanges(toRead.next));
     }
     BooleanRef isAllInCache = new BooleanRef();
     if (hasFileId) {
       cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache);
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Disk ranges after cache (found everything " + isAllInCache.value + "; file "
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Disk ranges after cache (found everything " + isAllInCache.value + "; file "
             + fileKey + ", base offset " + stripeOffset  + "): "
             + RecordReaderUtils.stringifyDiskRanges(toRead.next));
       }
@@ -2078,8 +2078,8 @@ class EncodedReaderImpl implements EncodedReader {
         releaseBuffers(toRelease.keySet(), true);
         toRelease.clear();
       }
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Disk ranges after pre-read (file " + fileKey + ", base offset "
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Disk ranges after pre-read (file " + fileKey + ", base offset "
             + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
       }
       iter = toRead.next; // Reset the iter to start.