You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/12/03 20:45:43 UTC
[1/2] hive git commit: HIVE-12532 : LLAP Cache: Uncompressed data
cache has NPE (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Repository: hive
Updated Branches:
refs/heads/branch-2.0 4c8e47eee -> 1946ccbcd
refs/heads/master a603ed8d7 -> 1d02ab578
HIVE-12532 : LLAP Cache: Uncompressed data cache has NPE (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1d02ab57
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1d02ab57
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1d02ab57
Branch: refs/heads/master
Commit: 1d02ab578dbd47103a70710abd4d949ea8cea9d2
Parents: a603ed8
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Dec 3 11:44:10 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Dec 3 11:44:10 2015 -0800
----------------------------------------------------------------------
.../hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1d02ab57/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index f789a4f..a8b51b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -778,19 +778,18 @@ class EncodedReaderImpl implements EncodedReader {
}
// Account for maximum cache buffer size.
long streamLen = streamEnd - streamOffset;
- int partSize = determineUncompressedPartSize(), //
+ int partSize = determineUncompressedPartSize(),
partCount = (int)(streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0);
CacheChunk lastUncompressed = null;
MemoryBuffer[] singleAlloc = new MemoryBuffer[1];
- /*
-Starting pre-read for [12187411,17107411) at start: 12187411 end: 12449555 cache buffer: 0x5f64a8f6(2)
-Processing uncompressed file data at [12187411, 12449555)
- */
for (int i = 0; i < partCount; ++i) {
long partOffset = streamOffset + (i * partSize),
partEnd = Math.min(partOffset + partSize, streamEnd);
long hasEntirePartTo = partOffset; // We have 0 bytes of data for this part, for now.
+ if (current == null) {
+ break; // We have no data from this point on (could be unneeded), skip.
+ }
assert partOffset <= current.getOffset();
if (partOffset == current.getOffset() && current instanceof CacheChunk) {
// We assume cache chunks would always match the way we read, so check and skip it.
[2/2] hive git commit: HIVE-12532 : LLAP Cache: Uncompressed data
cache has NPE (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Posted by se...@apache.org.
HIVE-12532 : LLAP Cache: Uncompressed data cache has NPE (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1946ccbc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1946ccbc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1946ccbc
Branch: refs/heads/branch-2.0
Commit: 1946ccbcde454d6c224da8a5e9587636e99abb43
Parents: 4c8e47e
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Dec 3 11:44:10 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Dec 3 11:45:13 2015 -0800
----------------------------------------------------------------------
.../hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1946ccbc/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index f789a4f..a8b51b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -778,19 +778,18 @@ class EncodedReaderImpl implements EncodedReader {
}
// Account for maximum cache buffer size.
long streamLen = streamEnd - streamOffset;
- int partSize = determineUncompressedPartSize(), //
+ int partSize = determineUncompressedPartSize(),
partCount = (int)(streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0);
CacheChunk lastUncompressed = null;
MemoryBuffer[] singleAlloc = new MemoryBuffer[1];
- /*
-Starting pre-read for [12187411,17107411) at start: 12187411 end: 12449555 cache buffer: 0x5f64a8f6(2)
-Processing uncompressed file data at [12187411, 12449555)
- */
for (int i = 0; i < partCount; ++i) {
long partOffset = streamOffset + (i * partSize),
partEnd = Math.min(partOffset + partSize, streamEnd);
long hasEntirePartTo = partOffset; // We have 0 bytes of data for this part, for now.
+ if (current == null) {
+ break; // We have no data from this point on (could be unneeded), skip.
+ }
assert partOffset <= current.getOffset();
if (partOffset == current.getOffset() && current instanceof CacheChunk) {
// We assume cache chunks would always match the way we read, so check and skip it.