You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2015/11/30 20:15:23 UTC
[12/27] hive git commit: HIVE-12501 : LLAP: don't use
read(ByteBuffer) in IO (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
HIVE-12501 : LLAP: don't use read(ByteBuffer) in IO (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/18ca715e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/18ca715e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/18ca715e
Branch: refs/heads/master-fixed
Commit: 18ca715e88374ec11c98d7dba3be7cd0758964b0
Parents: 60cb16b
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Nov 25 17:25:06 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800
----------------------------------------------------------------------
.../hive/ql/io/orc/RecordReaderUtils.java | 43 +++++---------------
1 file changed, 11 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/18ca715e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
index 6f3a3e9..0caeb1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
@@ -327,47 +327,26 @@ public class RecordReaderUtils {
len -= read;
off += read;
}
- } else if (doForceDirect) {
- file.seek(base + off);
- ByteBuffer directBuf = ByteBuffer.allocateDirect(len);
- readDirect(file, len, directBuf);
- range = range.replaceSelfWith(new BufferChunk(directBuf, range.getOffset()));
} else {
+ // Don't use HDFS ByteBuffer API because it has no readFully, and is buggy and pointless.
byte[] buffer = new byte[len];
file.readFully((base + off), buffer, 0, buffer.length);
- range = range.replaceSelfWith(new BufferChunk(ByteBuffer.wrap(buffer), range.getOffset()));
+ ByteBuffer bb = null;
+ if (doForceDirect) {
+ bb = ByteBuffer.allocateDirect(len);
+ bb.put(buffer);
+ bb.position(0);
+ bb.limit(len);
+ } else {
+ bb = ByteBuffer.wrap(buffer);
+ }
+ range = range.replaceSelfWith(new BufferChunk(bb, range.getOffset()));
}
range = range.next;
}
return prev.next;
}
- public static void readDirect(FSDataInputStream file,
- int len, ByteBuffer directBuf) throws IOException {
- // TODO: HDFS API is a mess, so handle all kinds of cases.
- // Before 2.7, read() also doesn't adjust position correctly, so track it separately.
- int pos = directBuf.position(), startPos = pos, endPos = pos + len;
- try {
- while (pos < endPos) {
- int count = SHIMS.readByteBuffer(file, directBuf);
- if (count < 0) throw new EOFException();
- assert count != 0 : "0-length read: " + (endPos - pos) + "@" + (pos - startPos);
- pos += count;
- assert pos <= endPos : "Position " + pos + " > " + endPos + " after reading " + count;
- directBuf.position(pos);
- }
- } catch (UnsupportedOperationException ex) {
- assert pos == startPos;
- // Happens in q files and such.
- RecordReaderImpl.LOG.error("Stream does not support direct read; we will copy.");
- byte[] buffer = new byte[len];
- file.readFully(buffer, 0, buffer.length);
- directBuf.put(buffer);
- }
- directBuf.position(startPos);
- directBuf.limit(startPos + len);
- }
-
static List<DiskRange> getStreamBuffers(DiskRangeList range, long offset, long length) {
// This assumes sorted ranges (as do many other parts of ORC code.