You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/12/10 21:28:36 UTC

[1/2] hive git commit: HIVE-12599 : Add logging to debug rare unexpected refCount error from the LLAP IO layer (Sergey Shelukhin, reviewed by Siddharth Seth)

Repository: hive
Updated Branches:
  refs/heads/branch-2.0 f6170a0e8 -> a49f5d898
  refs/heads/master e7abf72c7 -> 688a3e17f


HIVE-12599 : Add logging to debug rare unexpected refCount error from the LLAP IO layer (Sergey Shelukhin, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a49f5d89
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a49f5d89
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a49f5d89

Branch: refs/heads/branch-2.0
Commit: a49f5d898ae79cf5002dcf92535119a33c133dbb
Parents: f6170a0
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Dec 10 12:26:58 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Dec 10 12:27:21 2015 -0800

----------------------------------------------------------------------
 .../ql/io/orc/encoded/EncodedReaderImpl.java    | 32 ++++++++++++++++++--
 1 file changed, 29 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a49f5d89/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index deeed52..ea7e0fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -386,12 +386,17 @@ class EncodedReaderImpl implements EncodedReader {
             cb = sctx.stripeLevelStream;
           } else {
             // This stream can be separated by RG using index. Let's do that.
+            // Offset to where this RG begins.
             long cOffset = sctx.offset + index.getPositions(sctx.streamIndexOffset);
+            // Offset relative to the beginning of the stream of where this RG ends.
             long nextCOffsetRel = isLastRg ? sctx.length
                 : nextIndex.getPositions(sctx.streamIndexOffset);
+            // Offset before which this RG is guaranteed to end. Can only be estimated.
             // We estimate the same way for compressed and uncompressed for now.
             long endCOffset = sctx.offset + RecordReaderUtils.estimateRgEndOffset(
                 isCompressed, isLastRg, nextCOffsetRel, sctx.length, bufferSize);
+            // As we read, we can unlock initial refcounts for the buffers that end before
+            // the data that we need for this RG.
             long unlockUntilCOffset = sctx.offset + nextCOffsetRel;
             cb = createRgColumnStreamData(
                 rgIx, isLastRg, ctx.colIx, sctx, cOffset, endCOffset, isCompressed);
@@ -543,6 +548,12 @@ class EncodedReaderImpl implements EncodedReader {
     }
 
     @Override
+    public String toString() {
+      return super.toString() + ", original is set " + (this.originalData != null)
+          + ", buffer was replaced " + (originalCbIndex == -1);
+    }
+
+    @Override
     public void handleCacheCollision(DataCache cache, MemoryBuffer replacementBuffer,
         List<MemoryBuffer> cacheBuffers) {
       assert originalCbIndex >= 0;
@@ -1020,17 +1031,32 @@ class EncodedReaderImpl implements EncodedReader {
 
   private void ponderReleaseInitialRefcount(
       long unlockUntilCOffset, long streamStartOffset, CacheChunk cc) {
+    // Don't release if the buffer contains any data beyond the acceptable boundary.
     if (cc.getEnd() > unlockUntilCOffset) return;
     assert cc.getBuffer() != null;
-    releaseInitialRefcount(cc, false);
-    // Release all the previous buffers that we may not have been able to release due to reuse.
+    try {
+      releaseInitialRefcount(cc, false);
+    } catch (AssertionError e) {
+      LOG.error("BUG: releasing initial refcount; stream start " + streamStartOffset + ", "
+          + "unlocking until " + unlockUntilCOffset + " from [" + cc + "]: " + e.getMessage());
+      throw e;
+    }
+    // Release all the previous buffers that we may not have been able to release due to reuse,
+    // as long as they are still in the same stream and are not already released.
     DiskRangeList prev = cc.prev;
     while (true) {
       if ((prev == null) || (prev.getEnd() <= streamStartOffset)
           || !(prev instanceof CacheChunk)) break;
       CacheChunk prevCc = (CacheChunk)prev;
       if (prevCc.buffer == null) break;
-      releaseInitialRefcount(prevCc, true);
+      try {
+        releaseInitialRefcount(prevCc, true);
+      } catch (AssertionError e) {
+        LOG.error("BUG: releasing initial refcount; stream start " + streamStartOffset + ", "
+            + "unlocking until " + unlockUntilCOffset + " from [" + cc + "] and backtracked to ["
+            + prevCc + "]: " + e.getMessage());
+        throw e;
+      }
       prev = prev.prev;
     }
   }


[2/2] hive git commit: HIVE-12599 : Add logging to debug rare unexpected refCount error from the LLAP IO layer (Sergey Shelukhin, reviewed by Siddharth Seth)

Posted by se...@apache.org.
HIVE-12599 : Add logging to debug rare unexpected refCount error from the LLAP IO layer (Sergey Shelukhin, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/688a3e17
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/688a3e17
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/688a3e17

Branch: refs/heads/master
Commit: 688a3e17f3489f6246661c9ab52800b1f035a60e
Parents: e7abf72
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Dec 10 12:26:58 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Dec 10 12:27:51 2015 -0800

----------------------------------------------------------------------
 .../ql/io/orc/encoded/EncodedReaderImpl.java    | 32 ++++++++++++++++++--
 1 file changed, 29 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/688a3e17/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index deeed52..ea7e0fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -386,12 +386,17 @@ class EncodedReaderImpl implements EncodedReader {
             cb = sctx.stripeLevelStream;
           } else {
             // This stream can be separated by RG using index. Let's do that.
+            // Offset to where this RG begins.
             long cOffset = sctx.offset + index.getPositions(sctx.streamIndexOffset);
+            // Offset relative to the beginning of the stream of where this RG ends.
             long nextCOffsetRel = isLastRg ? sctx.length
                 : nextIndex.getPositions(sctx.streamIndexOffset);
+            // Offset before which this RG is guaranteed to end. Can only be estimated.
             // We estimate the same way for compressed and uncompressed for now.
             long endCOffset = sctx.offset + RecordReaderUtils.estimateRgEndOffset(
                 isCompressed, isLastRg, nextCOffsetRel, sctx.length, bufferSize);
+            // As we read, we can unlock initial refcounts for the buffers that end before
+            // the data that we need for this RG.
             long unlockUntilCOffset = sctx.offset + nextCOffsetRel;
             cb = createRgColumnStreamData(
                 rgIx, isLastRg, ctx.colIx, sctx, cOffset, endCOffset, isCompressed);
@@ -543,6 +548,12 @@ class EncodedReaderImpl implements EncodedReader {
     }
 
     @Override
+    public String toString() {
+      return super.toString() + ", original is set " + (this.originalData != null)
+          + ", buffer was replaced " + (originalCbIndex == -1);
+    }
+
+    @Override
     public void handleCacheCollision(DataCache cache, MemoryBuffer replacementBuffer,
         List<MemoryBuffer> cacheBuffers) {
       assert originalCbIndex >= 0;
@@ -1020,17 +1031,32 @@ class EncodedReaderImpl implements EncodedReader {
 
   private void ponderReleaseInitialRefcount(
       long unlockUntilCOffset, long streamStartOffset, CacheChunk cc) {
+    // Don't release if the buffer contains any data beyond the acceptable boundary.
     if (cc.getEnd() > unlockUntilCOffset) return;
     assert cc.getBuffer() != null;
-    releaseInitialRefcount(cc, false);
-    // Release all the previous buffers that we may not have been able to release due to reuse.
+    try {
+      releaseInitialRefcount(cc, false);
+    } catch (AssertionError e) {
+      LOG.error("BUG: releasing initial refcount; stream start " + streamStartOffset + ", "
+          + "unlocking until " + unlockUntilCOffset + " from [" + cc + "]: " + e.getMessage());
+      throw e;
+    }
+    // Release all the previous buffers that we may not have been able to release due to reuse,
+    // as long as they are still in the same stream and are not already released.
     DiskRangeList prev = cc.prev;
     while (true) {
       if ((prev == null) || (prev.getEnd() <= streamStartOffset)
           || !(prev instanceof CacheChunk)) break;
       CacheChunk prevCc = (CacheChunk)prev;
       if (prevCc.buffer == null) break;
-      releaseInitialRefcount(prevCc, true);
+      try {
+        releaseInitialRefcount(prevCc, true);
+      } catch (AssertionError e) {
+        LOG.error("BUG: releasing initial refcount; stream start " + streamStartOffset + ", "
+            + "unlocking until " + unlockUntilCOffset + " from [" + cc + "] and backtracked to ["
+            + prevCc + "]: " + e.getMessage());
+        throw e;
+      }
       prev = prev.prev;
     }
   }