You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2017/02/16 18:37:16 UTC
[2/3] hive git commit: HIVE-15143 : add logging for HIVE-15024
ADDENDUM (Sergey Shelukhin, reviewed by Siddharth Seth)
HIVE-15143 : add logging for HIVE-15024 ADDENDUM (Sergey Shelukhin, reviewed by Siddharth Seth)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b7e12faf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b7e12faf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b7e12faf
Branch: refs/heads/storage-branch-2.2
Commit: b7e12faf35f3f27f24fc0ecd3f5875cb0c6315a1
Parents: 26b49a7
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Jan 11 13:18:20 2017 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Wed Feb 15 14:35:29 2017 -0800
----------------------------------------------------------------------
.../hive/llap/cache/LowLevelCacheImpl.java | 37 ++++++++++++++++++++
.../hive/llap/cache/TestLowLevelCacheImpl.java | 34 ++++++++++++++++++
.../ql/io/orc/encoded/EncodedReaderImpl.java | 13 +++----
.../hadoop/hive/common/io/DiskRangeList.java | 2 +-
4 files changed, 79 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b7e12faf/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
index ea458ca..9ad0b26 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hive.llap.cache;
+import org.apache.orc.impl.RecordReaderUtils;
+
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.List;
@@ -108,6 +110,7 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
} finally {
subCache.decRef();
}
+ boolean isInvalid = false;
if (qfCounters != null) {
DiskRangeList current = prev.next;
long bytesHit = 0, bytesMissed = 0;
@@ -116,12 +119,46 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
if (current.hasData()) {
bytesHit += current.getLength();
} else {
+ if (gotAllData.value) {
+ isInvalid = true;
+ }
bytesMissed += current.getLength();
}
current = current.next;
}
qfCounters.recordCacheHit(bytesHit);
qfCounters.recordCacheMiss(bytesMissed);
+ } else if (gotAllData.value) {
+ DiskRangeList current = prev.next;
+ while (current != null) {
+ if (!current.hasData()) {
+ isInvalid = true;
+ break;
+ }
+ current = current.next;
+ }
+ }
+ if (isInvalid) {
+ StringBuilder invalidMsg = new StringBuilder(
+ "Internal error - gotAllData=true but the resulting ranges are ").append(
+ RecordReaderUtils.stringifyDiskRanges(prev.next));
+ subCache = cache.get(fileKey);
+ if (subCache != null && subCache.incRef()) {
+ try {
+ invalidMsg.append("; cache ranges (not necessarily consistent) are ");
+ for (Map.Entry<Long, LlapDataBuffer> e : subCache.cache.entrySet()) {
+ long start = e.getKey(), end = start + e.getValue().declaredCachedLength;
+ invalidMsg.append("[").append(start).append(", ").append(end).append("), ");
+ }
+ } finally {
+ subCache.decRef();
+ }
+ } else {
+ invalidMsg.append("; cache ranges can no longer be determined");
+ }
+ String s = invalidMsg.toString();
+ LlapIoImpl.LOG.error(s);
+ throw new RuntimeException(s);
}
return prev.next;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b7e12faf/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
index 0ac0a0d..6c3ec03 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
@@ -121,6 +121,23 @@ public class TestLowLevelCacheImpl {
}
}
+/*
+Example code to test specific scenarios:
+ LowLevelCacheImpl cache = new LowLevelCacheImpl(
+ LlapDaemonCacheMetrics.create("test", "1"), new DummyCachePolicy(),
+ new DummyAllocator(), true, -1); // no cleanup thread
+ final int FILE = 1;
+ cache.putFileData(FILE, gaps(3756206, 4261729, 7294767, 7547564), fbs(3), 0, Priority.NORMAL, null);
+ cache.putFileData(FILE, gaps(7790545, 11051556), fbs(1), 0, Priority.NORMAL, null);
+ cache.putFileData(FILE, gaps(11864971, 11912961, 13350968, 13393630), fbs(3), 0, Priority.NORMAL, null);
+ DiskRangeList dr = dr(3756206, 7313562);
+ MutateHelper mh = new MutateHelper(dr);
+ dr = dr.insertAfter(dr(7790545, 11051556));
+ dr = dr.insertAfter(dr(11864971, 13393630));
+ BooleanRef g = new BooleanRef();
+ dr = cache.getFileData(FILE, mh.next, 0, testFactory, null, g);
+*/
+
@Test
public void testGetPut() {
LowLevelCacheImpl cache = new LowLevelCacheImpl(
@@ -488,6 +505,15 @@ public class TestLowLevelCacheImpl {
return rv;
}
+ private MemoryBuffer[] fbs(int count) {
+ MemoryBuffer[] rv = new MemoryBuffer[count];
+ for (int i = 0; i < count; ++i) {
+ rv[i] = fb();
+ }
+ return rv;
+ }
+
+
private LlapDataBuffer fb() {
LlapDataBuffer fake = LowLevelCacheImpl.allocateFake();
fake.incRef();
@@ -506,6 +532,14 @@ public class TestLowLevelCacheImpl {
return result;
}
+ private DiskRange[] gaps(int... offsets) {
+ DiskRange[] result = new DiskRange[offsets.length - 1];
+ for (int i = 0; i < result .length; ++i) {
+ result[i] = new DiskRange(offsets[i], offsets[i + 1]);
+ }
+ return result;
+ }
+
private int generateOffsets(int offsetsToUse, Random rdm, int[] offsets) {
for (int j = 0; j < offsets.length; ++j) {
offsets[j] = rdm.nextInt(offsetsToUse);
http://git-wip-us.apache.org/repos/asf/hive/blob/b7e12faf/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index edf3218..0ac3ec5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -289,16 +289,17 @@ class EncodedReaderImpl implements EncodedReader {
// 2. Now, read all of the ranges from cache or disk.
DiskRangeList.MutateHelper toRead = new DiskRangeList.MutateHelper(listToRead.get());
- if (isTracingEnabled && LOG.isInfoEnabled()) {
- LOG.trace("Resulting disk ranges to read (file " + fileKey + "): "
+ if (/*isTracingEnabled && */LOG.isInfoEnabled()) {
+ LOG.info("Resulting disk ranges to read (file " + fileKey + "): "
+ RecordReaderUtils.stringifyDiskRanges(toRead.next));
}
BooleanRef isAllInCache = new BooleanRef();
if (hasFileId) {
cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache);
- if (isTracingEnabled && LOG.isInfoEnabled()) {
- LOG.trace("Disk ranges after cache (file " + fileKey + ", base offset " + stripeOffset
- + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
+ if (/*isTracingEnabled && */LOG.isInfoEnabled()) {
+ LOG.info("Disk ranges after cache (found everything " + isAllInCache.value + "; file "
+ + fileKey + ", base offset " + stripeOffset + "): "
+ + RecordReaderUtils.stringifyDiskRanges(toRead.next));
}
}
@@ -640,7 +641,7 @@ class EncodedReaderImpl implements EncodedReader {
: prepareRangesForUncompressedRead(
cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd);
} catch (Exception ex) {
- LOG.error("Failed " + (isCompressed ? "" : "un") + " compressed read; cOffset " + cOffset
+ LOG.error("Failed " + (isCompressed ? "" : "un") + "compressed read; cOffset " + cOffset
+ ", endCOffset " + endCOffset + ", streamOffset " + streamOffset
+ ", unlockUntilCOffset " + unlockUntilCOffset + "; ranges passed in "
+ RecordReaderUtils.stringifyDiskRanges(start) + "; ranges passed to prepare "
http://git-wip-us.apache.org/repos/asf/hive/blob/b7e12faf/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java b/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java
index 62f9d8e..5de225e 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java
@@ -169,7 +169,7 @@ public class DiskRangeList extends DiskRange {
public void addOrMerge(long offset, long end, boolean doMerge, boolean doLogNew) {
if (doMerge && tail != null && tail.merge(offset, end)) return;
if (doLogNew) {
- LOG.info("Creating new range; last range (which can include some previous adds) was "
+ LOG.debug("Creating new range; last range (which can include some previous adds) was "
+ tail);
}
DiskRangeList node = new DiskRangeList(offset, end);