You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/12/10 03:40:45 UTC
hive git commit: HIVE-12598 : LLAP: disable fileId when not supported
(Sergey Shelukhin, reviewed by Prasanth Jayachandran, Lefty Leverenz)
Repository: hive
Updated Branches:
refs/heads/master 915587b8c -> 57f39a990
HIVE-12598 : LLAP: disable fileId when not supported (Sergey Shelukhin, reviewed by Prasanth Jayachandran, Lefty Leverenz)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57f39a99
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57f39a99
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57f39a99
Branch: refs/heads/master
Commit: 57f39a9900001a626603fcd4f6878d570e642b5b
Parents: 915587b
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Dec 9 18:37:41 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Wed Dec 9 18:37:41 2015 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 11 ++
data/conf/hive-site.xml | 5 +
data/conf/llap/hive-site.xml | 5 +
data/conf/llap/llap-daemon-site.xml | 5 +
.../llap/io/encoded/OrcEncodedDataReader.java | 54 +++++--
.../src/test/resources/llap-daemon-site.xml | 6 +
.../org/apache/hadoop/hive/ql/io/HdfsUtils.java | 15 +-
.../apache/hadoop/hive/ql/io/orc/InStream.java | 28 ++--
.../hive/ql/io/orc/MetadataReaderImpl.java | 6 +-
.../hadoop/hive/ql/io/orc/ReaderImpl.java | 4 +-
.../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 2 +-
.../ql/io/orc/SettableUncompressedStream.java | 5 +-
.../ql/io/orc/encoded/EncodedReaderImpl.java | 26 +--
.../orc/encoded/EncodedTreeReaderFactory.java | 161 +++++--------------
.../hadoop/hive/ql/io/orc/encoded/Reader.java | 2 +-
.../hive/ql/io/orc/encoded/ReaderImpl.java | 2 +-
.../hive/ql/io/orc/encoded/StreamUtils.java | 4 +-
.../hive/ql/io/orc/TestBitFieldReader.java | 9 +-
.../hadoop/hive/ql/io/orc/TestBitPack.java | 2 +-
.../hadoop/hive/ql/io/orc/TestInStream.java | 20 +--
.../ql/io/orc/TestIntegerCompressionReader.java | 4 +-
.../hive/ql/io/orc/TestRunLengthByteReader.java | 6 +-
.../ql/io/orc/TestRunLengthIntegerReader.java | 4 +-
23 files changed, 180 insertions(+), 206 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 7e9bf61..ce6ad6b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2345,6 +2345,17 @@ public class HiveConf extends Configuration {
LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.01f,
"Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\n" +
"behave like LFU, 1 makes it behave like LRU, values in between balance accordingly."),
+ LLAP_CACHE_ALLOW_SYNTHETIC_FILEID("hive.llap.cache.allow.synthetic.fileid", false,
+ "Whether LLAP cache should use synthetic file ID if real one is not available. Systems\n" +
+ "like HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\n" +
+ "FS), the cache would not work by default because LLAP is unable to uniquely track the\n" +
+ "files; enabling this setting allows LLAP to generate file ID from the path, size and\n" +
+ "modification time, which is almost certain to identify file uniquely. However, if you\n" +
+ "use a FS without file IDs and rewrite files a lot (or are paranoid), you might want\n" +
+ "to avoid this setting."),
+ LLAP_IO_USE_FILEID_PATH("hive.llap.io.use.fileid.path", true,
+ "Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\n" +
+ "cases of file overwrites. This is supported on HDFS."),
LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
"Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"),
LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", true,
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/data/conf/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index 2ebb1c4..84005b4 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -275,6 +275,11 @@
</property>
<property>
+ <name>hive.llap.cache.allow.synthetic.fileid</name>
+ <value>true</value>
+</property>
+
+<property>
<name>hive.llap.io.use.lrfu</name>
<value>false</value>
</property>
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/data/conf/llap/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/llap/hive-site.xml b/data/conf/llap/hive-site.xml
index 9440611..e3425c8 100644
--- a/data/conf/llap/hive-site.xml
+++ b/data/conf/llap/hive-site.xml
@@ -269,6 +269,11 @@
</property>
<property>
+ <name>hive.llap.cache.allow.synthetic.fileid</name>
+ <value>true</value>
+</property>
+
+<property>
<name>hive.llap.io.cache.direct</name>
<value>false</value>
</property>
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/data/conf/llap/llap-daemon-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/llap/llap-daemon-site.xml b/data/conf/llap/llap-daemon-site.xml
index cc3e438..98c0f2b 100644
--- a/data/conf/llap/llap-daemon-site.xml
+++ b/data/conf/llap/llap-daemon-site.xml
@@ -37,6 +37,11 @@
<value>4</value>
</property>
+<property>
+ <name>hive.llap.cache.allow.synthetic.fileid</name>
+ <value>true</value>
+</property>
+
<!-- hadoop IPC options -->
<property>
<name>ipc.client.low-latency</name>
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 885acc0..f9d06e9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hive.common.io.DiskRange;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.llap.ConsumerFeedback;
import org.apache.hadoop.hive.llap.DebugUtils;
import org.apache.hadoop.hive.llap.cache.Cache;
@@ -148,7 +149,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
private Reader orcReader;
private MetadataReader metadataReader;
private EncodedReader stripeReader;
- private long fileId;
+ private Long fileId;
private FileSystem fs;
/**
* readState[stripeIx'][colIx'] => boolean array (could be a bitmask) of rg-s that need to be
@@ -227,8 +228,10 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
// 1. Get file metadata from cache, or create the reader and read it.
// Don't cache the filesystem object for now; Tez closes it and FS cache will fix all that
fs = split.getPath().getFileSystem(conf);
- fileId = determineFileId(fs, split);
- counters.setDesc(QueryFragmentCounters.Desc.FILE, fileId);
+ fileId = determineFileId(fs, split,
+ HiveConf.getBoolVar(conf, ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID));
+ counters.setDesc(QueryFragmentCounters.Desc.FILE, split.getPath()
+ + (fileId == null ? "" : " (" + fileId + ")"));
try {
fileMetadata = getOrReadFileMetadata();
@@ -333,6 +336,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
// 6. Read data.
// TODO: I/O threadpool could be here - one thread per stripe; for now, linear.
+ boolean hasFileId = this.fileId != null;
+ long fileId = hasFileId ? this.fileId : 0;
OrcBatchKey stripeKey = new OrcBatchKey(fileId, -1, 0);
for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
if (processStop()) {
@@ -378,7 +383,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
stripeMetadata = stripeMetadatas.get(stripeIxMod);
} else {
stripeKey.stripeIx = stripeIx;
- stripeMetadata = metadataCache.getStripeMetadata(stripeKey);
+ if (hasFileId) {
+ stripeMetadata = metadataCache.getStripeMetadata(stripeKey);
+ }
isFoundInCache = (stripeMetadata != null);
if (!isFoundInCache) {
counters.incrCounter(Counter.METADATA_CACHE_MISS);
@@ -387,7 +394,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
stripeMetadata = new OrcStripeMetadata(
stripeKey, metadataReader, stripe, stripeIncludes, sargColumns);
counters.incrTimeCounter(Counter.HDFS_TIME_US, startTimeHdfs);
- stripeMetadata = metadataCache.putStripeMetadata(stripeMetadata);
+ if (hasFileId) {
+ stripeMetadata = metadataCache.putStripeMetadata(stripeMetadata);
+ }
if (DebugUtils.isTraceOrcEnabled()) {
LlapIoImpl.LOG.info("Caching stripe " + stripeKey.stripeIx
+ " metadata with includes: " + DebugUtils.toString(stripeIncludes));
@@ -515,7 +524,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
return true;
}
- private static long determineFileId(FileSystem fs, FileSplit split) throws IOException {
+ private static Long determineFileId(FileSystem fs, FileSplit split,
+ boolean allowSynthetic) throws IOException {
if (split instanceof OrcSplit) {
Long fileId = ((OrcSplit)split).getFileId();
if (fileId != null) {
@@ -523,7 +533,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
}
}
LOG.warn("Split for " + split.getPath() + " (" + split.getClass() + ") does not have file ID");
- return HdfsUtils.getFileId(fs, split.getPath());
+ return HdfsUtils.getFileId(fs, split.getPath(), allowSynthetic);
}
private boolean[][] genStripeColRgs(List<Integer> stripeCols, boolean[][] globalColRgs) {
@@ -588,7 +598,10 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
*/
private void ensureOrcReader() throws IOException {
if (orcReader != null) return;
- Path path = HdfsUtils.getFileIdPath(fs, split.getPath(), fileId);
+ Path path = split.getPath();
+ if (fileId != null && HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_USE_FILEID_PATH)) {
+ path = HdfsUtils.getFileIdPath(fs, split.getPath(), fileId);
+ }
if (DebugUtils.isTraceOrcEnabled()) {
LOG.info("Creating reader for " + path + " (" + split.getPath() + ")");
}
@@ -602,16 +615,19 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
* Gets file metadata for the split from cache, or reads it from the file.
*/
private OrcFileMetadata getOrReadFileMetadata() throws IOException {
- OrcFileMetadata metadata = metadataCache.getFileMetadata(fileId);
- if (metadata != null) {
- counters.incrCounter(Counter.METADATA_CACHE_HIT);
- return metadata;
+ OrcFileMetadata metadata = null;
+ if (fileId != null) {
+ metadata = metadataCache.getFileMetadata(fileId);
+ if (metadata != null) {
+ counters.incrCounter(Counter.METADATA_CACHE_HIT);
+ return metadata;
+ }
+ counters.incrCounter(Counter.METADATA_CACHE_MISS);
}
- counters.incrCounter(Counter.METADATA_CACHE_MISS);
ensureOrcReader();
// We assume this call doesn't touch HDFS because everything is already read; don't add time.
- metadata = new OrcFileMetadata(fileId, orcReader);
- return metadataCache.putFileMetadata(metadata);
+ metadata = new OrcFileMetadata(fileId == null ? fileId : 0, orcReader);
+ return (fileId == null) ? metadata : metadataCache.putFileMetadata(metadata);
}
/**
@@ -620,10 +636,12 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
private ArrayList<OrcStripeMetadata> readStripesMetadata(
boolean[] globalInc, boolean[] sargColumns) throws IOException {
ArrayList<OrcStripeMetadata> result = new ArrayList<OrcStripeMetadata>(readState.length);
+ boolean hasFileId = this.fileId != null;
+ long fileId = hasFileId ? this.fileId : 0;
OrcBatchKey stripeKey = new OrcBatchKey(fileId, 0, 0);
for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
stripeKey.stripeIx = stripeIxMod + stripeIxFrom;
- OrcStripeMetadata value = metadataCache.getStripeMetadata(stripeKey);
+ OrcStripeMetadata value = hasFileId ? metadataCache.getStripeMetadata(stripeKey) : null;
if (value == null || !value.hasAllIndexes(globalInc)) {
counters.incrCounter(Counter.METADATA_CACHE_MISS);
ensureMetadataReader();
@@ -632,7 +650,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
long startTime = counters.startTimeCounter();
value = new OrcStripeMetadata(stripeKey, metadataReader, si, globalInc, sargColumns);
counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
- value = metadataCache.putStripeMetadata(value);
+ if (hasFileId) {
+ value = metadataCache.putStripeMetadata(value);
+ }
if (DebugUtils.isTraceOrcEnabled()) {
LlapIoImpl.LOG.info("Caching stripe " + stripeKey.stripeIx
+ " metadata with includes: " + DebugUtils.toString(globalInc));
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/llap-server/src/test/resources/llap-daemon-site.xml
----------------------------------------------------------------------
diff --git a/llap-server/src/test/resources/llap-daemon-site.xml b/llap-server/src/test/resources/llap-daemon-site.xml
index ee240a9..8f4a54c 100644
--- a/llap-server/src/test/resources/llap-daemon-site.xml
+++ b/llap-server/src/test/resources/llap-daemon-site.xml
@@ -54,6 +54,12 @@
<description>Comma separate list of nodes running daemons</description>
</property>
+ <property>
+ <name>hive.llap.cache.allow.synthetic.fileid</name>
+ <value>true</value>
+ </property>
+
+
<!-- hadoop IPC options -->
<property>
<name>ipc.client.low-latency</name>
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
index 38c99fd..58bf9b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -32,16 +33,22 @@ public class HdfsUtils {
private static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
private static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
- public static long getFileId(FileSystem fileSystem, Path path) throws IOException {
+ public static Long getFileId(
+ FileSystem fileSystem, Path path, boolean allowSynthetic) throws IOException {
String pathStr = path.toUri().getPath();
if (fileSystem instanceof DistributedFileSystem) {
return SHIMS.getFileId(fileSystem, pathStr);
}
+ if (!allowSynthetic) return null;
// If we are not on DFS, we just hash the file name + size and hope for the best.
// TODO: we assume it only happens in tests. Fix?
int nameHash = pathStr.hashCode();
- long fileSize = fileSystem.getFileStatus(path).getLen();
- long id = ((fileSize ^ (fileSize >>> 32)) << 32) | ((long)nameHash & 0xffffffffL);
+ FileStatus fs = fileSystem.getFileStatus(path);
+ long fileSize = fs.getLen(), modTime = fs.getModificationTime();
+ int fileSizeHash = (int)(fileSize ^ (fileSize >>> 32)),
+ modTimeHash = (int)(modTime ^ (modTime >>> 32)),
+ combinedHash = modTimeHash ^ fileSizeHash;
+ long id = (((long)nameHash & 0xffffffffL) << 32) | ((long)combinedHash & 0xffffffffL);
LOG.warn("Cannot get unique file ID from "
+ fileSystem.getClass().getSimpleName() + "; using " + id + "(" + pathStr
+ "," + nameHash + "," + fileSize + ")");
@@ -55,7 +62,7 @@ public class HdfsUtils {
public static Path getFileIdPath(
FileSystem fileSystem, Path path, long fileId) {
- return (fileSystem instanceof DistributedFileSystem)
+ return ((fileSystem instanceof DistributedFileSystem))
? new Path(HDFS_ID_PATH_PREFIX + fileId) : path;
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/InStream.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/InStream.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/InStream.java
index 2275188..227cfca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/InStream.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/InStream.java
@@ -37,12 +37,10 @@ public abstract class InStream extends InputStream {
private static final Logger LOG = LoggerFactory.getLogger(InStream.class);
private static final int PROTOBUF_MESSAGE_MAX_LIMIT = 1024 << 20; // 1GB
- protected final Long fileId;
protected final String name;
protected long length;
- public InStream(Long fileId, String name, long length) {
- this.fileId = fileId;
+ public InStream(String name, long length) {
this.name = name;
this.length = length;
}
@@ -62,8 +60,8 @@ public abstract class InStream extends InputStream {
private ByteBuffer range;
private int currentRange;
- public UncompressedStream(Long fileId, String name, List<DiskRange> input, long length) {
- super(fileId, name, length);
+ public UncompressedStream(String name, List<DiskRange> input, long length) {
+ super(name, length);
reset(input, length);
}
@@ -188,9 +186,9 @@ public abstract class InStream extends InputStream {
private int currentRange;
private boolean isUncompressedOriginal;
- public CompressedStream(Long fileId, String name, List<DiskRange> input, long length,
+ public CompressedStream(String name, List<DiskRange> input, long length,
CompressionCodec codec, int bufferSize) {
- super(fileId, name, length);
+ super(name, length);
this.bytes = input;
this.codec = codec;
this.bufferSize = bufferSize;
@@ -438,8 +436,7 @@ public abstract class InStream extends InputStream {
*/
@VisibleForTesting
@Deprecated
- public static InStream create(Long fileId,
- String streamName,
+ public static InStream create(String streamName,
ByteBuffer[] buffers,
long[] offsets,
long length,
@@ -449,7 +446,7 @@ public abstract class InStream extends InputStream {
for (int i = 0; i < buffers.length; ++i) {
input.add(new BufferChunk(buffers[i], offsets[i]));
}
- return create(fileId, streamName, input, length, codec, bufferSize);
+ return create(streamName, input, length, codec, bufferSize);
}
/**
@@ -463,16 +460,15 @@ public abstract class InStream extends InputStream {
* @return an input stream
* @throws IOException
*/
- public static InStream create(Long fileId,
- String name,
+ public static InStream create(String name,
List<DiskRange> input,
long length,
CompressionCodec codec,
int bufferSize) throws IOException {
if (codec == null) {
- return new UncompressedStream(fileId, name, input, length);
+ return new UncompressedStream(name, input, length);
} else {
- return new CompressedStream(fileId, name, input, length, codec, bufferSize);
+ return new CompressedStream(name, input, length, codec, bufferSize);
}
}
@@ -487,13 +483,13 @@ public abstract class InStream extends InputStream {
* @return coded input stream
* @throws IOException
*/
- public static CodedInputStream createCodedInputStream(Long fileId,
+ public static CodedInputStream createCodedInputStream(
String name,
List<DiskRange> input,
long length,
CompressionCodec codec,
int bufferSize) throws IOException {
- InStream inStream = create(fileId, name, input, length, codec, bufferSize);
+ InStream inStream = create(name, input, length, codec, bufferSize);
CodedInputStream codedInputStream = CodedInputStream.newInstance(inStream);
codedInputStream.setSizeLimit(PROTOBUF_MESSAGE_MAX_LIMIT);
return codedInputStream;
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReaderImpl.java
index 5afba51..4624927 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReaderImpl.java
@@ -85,13 +85,13 @@ public class MetadataReaderImpl implements MetadataReader {
byte[] buffer = new byte[len];
file.readFully(offset, buffer, 0, buffer.length);
ByteBuffer bb = ByteBuffer.wrap(buffer);
- indexes[col] = OrcProto.RowIndex.parseFrom(InStream.create(null, "index",
+ indexes[col] = OrcProto.RowIndex.parseFrom(InStream.create("index",
Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)), stream.getLength(),
codec, bufferSize));
if (readBloomFilter) {
bb.position((int) stream.getLength());
bloomFilterIndices[col] = OrcProto.BloomFilterIndex.parseFrom(InStream.create(
- null, "bloom_filter", Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)),
+ "bloom_filter", Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)),
nextStream.getLength(), codec, bufferSize));
}
}
@@ -111,7 +111,7 @@ public class MetadataReaderImpl implements MetadataReader {
// read the footer
ByteBuffer tailBuf = ByteBuffer.allocate(tailLength);
file.readFully(offset, tailBuf.array(), tailBuf.arrayOffset(), tailLength);
- return OrcProto.StripeFooter.parseFrom(InStream.createCodedInputStream(null, "footer",
+ return OrcProto.StripeFooter.parseFrom(InStream.createCodedInputStream("footer",
Lists.<DiskRange>newArrayList(new BufferChunk(tailBuf, 0)),
tailLength, codec, bufferSize));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index f6dea25..f2f5f49 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -418,7 +418,7 @@ public class ReaderImpl implements Reader {
int footerSize, CompressionCodec codec, int bufferSize) throws IOException {
bb.position(footerAbsPos);
bb.limit(footerAbsPos + footerSize);
- return OrcProto.Footer.parseFrom(InStream.createCodedInputStream(null, "footer",
+ return OrcProto.Footer.parseFrom(InStream.createCodedInputStream("footer",
Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)), footerSize, codec, bufferSize));
}
@@ -426,7 +426,7 @@ public class ReaderImpl implements Reader {
int metadataSize, CompressionCodec codec, int bufferSize) throws IOException {
bb.position(metadataAbsPos);
bb.limit(metadataAbsPos + metadataSize);
- return OrcProto.Metadata.parseFrom(InStream.createCodedInputStream(null, "metadata",
+ return OrcProto.Metadata.parseFrom(InStream.createCodedInputStream("metadata",
Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)), metadataSize, codec, bufferSize));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index 7f550a4..f36bceb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -993,7 +993,7 @@ public class RecordReaderImpl implements RecordReader {
List<DiskRange> buffers = RecordReaderUtils.getStreamBuffers(
ranges, streamOffset, streamDesc.getLength());
StreamName name = new StreamName(column, streamDesc.getKind());
- streams.put(name, InStream.create(null, name.toString(), buffers,
+ streams.put(name, InStream.create(name.toString(), buffers,
streamDesc.getLength(), codec, bufferSize));
streamOffset += streamDesc.getLength();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SettableUncompressedStream.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SettableUncompressedStream.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SettableUncompressedStream.java
index eaf4fcc..3496de9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SettableUncompressedStream.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SettableUncompressedStream.java
@@ -27,9 +27,8 @@ import org.apache.hadoop.hive.common.io.DiskRange;
*/
public class SettableUncompressedStream extends InStream.UncompressedStream {
- public SettableUncompressedStream(Long fileId, String name,
- List<DiskRange> input, long length) {
- super(fileId, name, input, length);
+ public SettableUncompressedStream(String name, List<DiskRange> input, long length) {
+ super(name, input, length);
setOffset(input);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index a8b51b9..deeed52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -101,7 +101,7 @@ class EncodedReaderImpl implements EncodedReader {
return tcc;
}
};
- private final long fileId;
+ private final Long fileId;
private final DataReader dataReader;
private boolean isDataReaderOpen = false;
private final CompressionCodec codec;
@@ -276,6 +276,8 @@ class EncodedReaderImpl implements EncodedReader {
offset += length;
}
+ boolean hasFileId = this.fileId != null;
+ long fileId = hasFileId ? this.fileId : 0;
if (listToRead.get() == null) {
// No data to read for this stripe. Check if we have some included index-only columns.
// TODO: there may be a bug here. Could there be partial RG filtering on index-only column?
@@ -296,10 +298,12 @@ class EncodedReaderImpl implements EncodedReader {
+ RecordReaderUtils.stringifyDiskRanges(toRead.next));
}
BooleanRef isAllInCache = new BooleanRef();
- cache.getFileData(fileId, toRead.next, stripeOffset, CC_FACTORY, isAllInCache);
- if (isDebugTracingEnabled && LOG.isInfoEnabled()) {
- LOG.info("Disk ranges after cache (file " + fileId + ", base offset " + stripeOffset
- + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
+ if (hasFileId) {
+ cache.getFileData(fileId, toRead.next, stripeOffset, CC_FACTORY, isAllInCache);
+ if (isDebugTracingEnabled && LOG.isInfoEnabled()) {
+ LOG.info("Disk ranges after cache (file " + fileId + ", base offset " + stripeOffset
+ + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
+ }
}
if (!isAllInCache.value) {
@@ -653,8 +657,10 @@ class EncodedReaderImpl implements EncodedReader {
}
// 6. Finally, put uncompressed data to cache.
- long[] collisionMask = cache.putFileData(fileId, cacheKeys, targetBuffers, baseOffset);
- processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
+ if (fileId != null) {
+ long[] collisionMask = cache.putFileData(fileId, cacheKeys, targetBuffers, baseOffset);
+ processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
+ }
// 7. It may happen that we know we won't use some compression buffers anymore.
// Release initial refcounts.
@@ -902,8 +908,10 @@ class EncodedReaderImpl implements EncodedReader {
}
// 6. Finally, put uncompressed data to cache.
- long[] collisionMask = cache.putFileData(fileId, cacheKeys, targetBuffers, baseOffset);
- processCacheCollisions(collisionMask, toCache, targetBuffers, null);
+ if (fileId != null) {
+ long[] collisionMask = cache.putFileData(fileId, cacheKeys, targetBuffers, baseOffset);
+ processCacheCollisions(collisionMask, toCache, targetBuffers, null);
+ }
return lastUncompressed;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
index 567214f..dd6f64c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
@@ -98,7 +98,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
@@ -107,11 +106,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private OrcProto.ColumnEncoding columnEncoding;
private boolean skipCorrupt;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
-
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
return this;
@@ -150,15 +144,15 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public TimestampStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
SettableUncompressedStream nanos = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.SECONDARY.name(),
- fileId, nanosStream);
+ nanosStream);
boolean isFileCompressed = compressionCodec != null;
return new TimestampStreamReader(columnIndex, present, data, nanos,
@@ -261,7 +255,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
@@ -270,10 +263,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -313,18 +302,18 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public StringStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
SettableUncompressedStream length = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.LENGTH.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.LENGTH.name(),
lengthStream);
SettableUncompressedStream dictionary = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.DICTIONARY_DATA.name(), fileId, dictionaryStream);
+ OrcProto.Stream.Kind.DICTIONARY_DATA.name(), dictionaryStream);
boolean isFileCompressed = compressionCodec != null;
return new StringStreamReader(columnIndex, present, data, length, dictionary,
@@ -383,17 +372,12 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -423,10 +407,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public ShortStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -485,7 +469,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
@@ -493,10 +476,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private OrcProto.ColumnEncoding columnEncoding;
private boolean skipCorrupt;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -531,10 +510,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public LongStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -593,17 +572,12 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -633,10 +607,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public IntStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -695,16 +669,11 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -729,10 +698,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public FloatStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -790,16 +759,11 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -824,10 +788,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public DoubleStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -899,7 +863,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData valueStream;
@@ -909,10 +872,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -956,13 +915,13 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public DecimalStreamReader build() throws IOException {
SettableUncompressedStream presentInStream = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.PRESENT.name(), fileId, presentStream);
+ OrcProto.Stream.Kind.PRESENT.name(), presentStream);
SettableUncompressedStream valueInStream = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.DATA.name(), fileId, valueStream);
+ OrcProto.Stream.Kind.DATA.name(), valueStream);
SettableUncompressedStream scaleInStream = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.SECONDARY.name(), fileId, scaleStream);
+ OrcProto.Stream.Kind.SECONDARY.name(), scaleStream);
boolean isFileCompressed = compressionCodec != null;
return new DecimalStreamReader(columnIndex, precision, scale, presentInStream,
@@ -1021,18 +980,12 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
-
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
return this;
@@ -1061,11 +1014,11 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public DateStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -1169,7 +1122,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private int maxLength;
private ColumnStreamData presentStream;
@@ -1179,10 +1131,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -1227,18 +1175,18 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public CharStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
SettableUncompressedStream length = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.LENGTH.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.LENGTH.name(),
lengthStream);
SettableUncompressedStream dictionary = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.DICTIONARY_DATA.name(), fileId, dictionaryStream);
+ OrcProto.Stream.Kind.DICTIONARY_DATA.name(), dictionaryStream);
boolean isFileCompressed = compressionCodec != null;
return new CharStreamReader(columnIndex, maxLength, present, data, length,
@@ -1342,7 +1290,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private int maxLength;
private ColumnStreamData presentStream;
@@ -1352,10 +1299,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -1400,18 +1343,18 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public VarcharStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
SettableUncompressedStream length = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.LENGTH.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.LENGTH.name(),
lengthStream);
SettableUncompressedStream dictionary = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.DICTIONARY_DATA.name(), fileId, dictionaryStream);
+ OrcProto.Stream.Kind.DICTIONARY_DATA.name(), dictionaryStream);
boolean isFileCompressed = compressionCodec != null;
return new VarcharStreamReader(columnIndex, maxLength, present, data, length,
@@ -1469,16 +1412,11 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -1503,10 +1441,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public ByteStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -1577,7 +1515,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
@@ -1585,10 +1522,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private CompressionCodec compressionCodec;
private OrcProto.ColumnEncoding columnEncoding;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -1622,13 +1555,13 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public BinaryStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.PRESENT.name(), fileId, presentStream);
+ OrcProto.Stream.Kind.PRESENT.name(), presentStream);
SettableUncompressedStream data = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.DATA.name(), fileId, dataStream);
+ OrcProto.Stream.Kind.DATA.name(), dataStream);
SettableUncompressedStream length = StreamUtils.createSettableUncompressedStream(
- OrcProto.Stream.Kind.LENGTH.name(), fileId, lengthStream);
+ OrcProto.Stream.Kind.LENGTH.name(), lengthStream);
boolean isFileCompressed = compressionCodec != null;
return new BinaryStreamReader(columnIndex, present, data, length, isFileCompressed,
@@ -1685,16 +1618,11 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static class StreamReaderBuilder {
- private Long fileId;
private int columnIndex;
private ColumnStreamData presentStream;
private ColumnStreamData dataStream;
private CompressionCodec compressionCodec;
- public StreamReaderBuilder setFileId(Long fileId) {
- this.fileId = fileId;
- return this;
- }
public StreamReaderBuilder setColumnIndex(int columnIndex) {
this.columnIndex = columnIndex;
@@ -1719,10 +1647,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
public BooleanStreamReader build() throws IOException {
SettableUncompressedStream present = StreamUtils
.createSettableUncompressedStream(OrcProto.Stream.Kind.PRESENT.name(),
- fileId, presentStream);
+ presentStream);
SettableUncompressedStream data = StreamUtils
- .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(), fileId,
+ .createSettableUncompressedStream(OrcProto.Stream.Kind.DATA.name(),
dataStream);
boolean isFileCompressed = compressionCodec != null;
@@ -1740,7 +1668,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
List<OrcProto.ColumnEncoding> encodings,
EncodedColumnBatch<OrcBatchKey> batch,
CompressionCodec codec, boolean skipCorrupt) throws IOException {
- long file = batch.getBatchKey().file;
TreeReader[] treeReaders = new TreeReader[numCols];
for (int i = 0; i < numCols; i++) {
int columnIndex = batch.getColumnIxs()[i];
@@ -1765,7 +1692,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
switch (columnType.getKind()) {
case BINARY:
treeReaders[i] = BinaryStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1776,7 +1702,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case BOOLEAN:
treeReaders[i] = BooleanStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1785,7 +1710,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case BYTE:
treeReaders[i] = ByteStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1794,7 +1718,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case SHORT:
treeReaders[i] = ShortStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1804,7 +1727,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case INT:
treeReaders[i] = IntStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1814,7 +1736,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case LONG:
treeReaders[i] = LongStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1825,7 +1746,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case FLOAT:
treeReaders[i] = FloatStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1834,7 +1754,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case DOUBLE:
treeReaders[i] = DoubleStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1843,7 +1762,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case CHAR:
treeReaders[i] = CharStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setMaxLength(columnType.getMaximumLength())
.setPresentStream(present)
@@ -1856,7 +1774,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case VARCHAR:
treeReaders[i] = VarcharStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setMaxLength(columnType.getMaximumLength())
.setPresentStream(present)
@@ -1869,7 +1786,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case STRING:
treeReaders[i] = StringStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
@@ -1881,7 +1797,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case DECIMAL:
treeReaders[i] = DecimalStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPrecision(columnType.getPrecision())
.setScale(columnType.getScale())
@@ -1894,7 +1809,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case TIMESTAMP:
treeReaders[i] = TimestampStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setSecondsStream(data)
@@ -1906,7 +1820,6 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
break;
case DATE:
treeReaders[i] = DateStreamReader.builder()
- .setFileId(file)
.setColumnIndex(columnIndex)
.setPresentStream(present)
.setDataStream(data)
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
index 698632b..a27b35e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
@@ -68,5 +68,5 @@ public interface Reader extends org.apache.hadoop.hive.ql.io.orc.Reader {
* @return The reader.
*/
EncodedReader encodedReader(
- long fileId, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException;
+ Long fileId, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
index 7b3c2a0..a4fa03b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
@@ -35,7 +35,7 @@ class ReaderImpl extends org.apache.hadoop.hive.ql.io.orc.ReaderImpl implements
@Override
public EncodedReader encodedReader(
- long fileId, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException {
+ Long fileId, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException {
return new EncodedReaderImpl(fileId, types,
codec, bufferSize, rowIndexStride, dataCache, dataReader, pf);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
index 814697d..2c9acfb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
@@ -42,13 +42,13 @@ public class StreamUtils {
* @throws IOException
*/
public static SettableUncompressedStream createSettableUncompressedStream(String streamName,
- Long fileId, ColumnStreamData streamBuffer) throws IOException {
+ ColumnStreamData streamBuffer) throws IOException {
if (streamBuffer == null) {
return null;
}
DiskRangeInfo diskRangeInfo = createDiskRangeInfo(streamBuffer);
- return new SettableUncompressedStream(fileId, streamName, diskRangeInfo.getDiskRanges(),
+ return new SettableUncompressedStream(streamName, diskRangeInfo.getDiskRanges(),
diskRangeInfo.getTotalLength());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitFieldReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitFieldReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitFieldReader.java
index 9e73700..b537765 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitFieldReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitFieldReader.java
@@ -46,7 +46,7 @@ public class TestBitFieldReader {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- BitFieldReader in = new BitFieldReader(InStream.create(null, "test",
+ BitFieldReader in = new BitFieldReader(InStream.create("test",
new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
codec, 500), 1);
for(int i=0; i < COUNT; ++i) {
@@ -96,7 +96,7 @@ public class TestBitFieldReader {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- BitFieldReader in = new BitFieldReader(InStream.create(null, "test",
+ BitFieldReader in = new BitFieldReader(InStream.create("test",
new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
null, 500), 3);
for(int i=0; i < COUNT; ++i) {
@@ -126,9 +126,8 @@ public class TestBitFieldReader {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- BitFieldReader in = new BitFieldReader(InStream.create
- (null, "test", new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
- null, 100), 1);
+ BitFieldReader in = new BitFieldReader(InStream.create("test", new ByteBuffer[]{inBuf},
+ new long[]{0}, inBuf.remaining(), null, 100), 1);
for(int i=0; i < COUNT; i += 5) {
int x = (int) in.next();
if (i < COUNT/2) {
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitPack.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitPack.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitPack.java
index 5843a05..41a807b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitPack.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestBitPack.java
@@ -113,7 +113,7 @@ public class TestBitPack {
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
long[] buff = new long[SIZE];
- utils.readInts(buff, 0, SIZE, fixedWidth, InStream.create(null, "test", new ByteBuffer[] { inBuf },
+ utils.readInts(buff, 0, SIZE, fixedWidth, InStream.create("test", new ByteBuffer[] { inBuf },
new long[] { 0 }, inBuf.remaining(), null, SIZE));
for (int i = 0; i < SIZE; i++) {
buff[i] = utils.zigzagDecode(buff[i]);
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInStream.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInStream.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInStream.java
index ade959b..4c3ddfc 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInStream.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInStream.java
@@ -93,7 +93,7 @@ public class TestInStream {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- InStream in = InStream.create(null, "test", new ByteBuffer[]{inBuf},
+ InStream in = InStream.create("test", new ByteBuffer[]{inBuf},
new long[]{0}, inBuf.remaining(), null, 100);
assertEquals("uncompressed stream test position: 0 length: 1024" +
" range: 0 offset: 0 limit: 0",
@@ -125,7 +125,7 @@ public class TestInStream {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- InStream in = InStream.create(null, "test", new ByteBuffer[]{inBuf},
+ InStream in = InStream.create("test", new ByteBuffer[]{inBuf},
new long[]{0}, inBuf.remaining(), codec, 300);
assertEquals("compressed stream test position: 0 length: 961 range: 0" +
" offset: 0 limit: 0 range 0 = 0 to 961",
@@ -158,7 +158,7 @@ public class TestInStream {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- InStream in = InStream.create(null, "test", new ByteBuffer[]{inBuf},
+ InStream in = InStream.create("test", new ByteBuffer[]{inBuf},
new long[]{0}, inBuf.remaining(), codec, 100);
byte[] contents = new byte[1024];
try {
@@ -173,7 +173,7 @@ public class TestInStream {
inBuf.put((byte) 32);
inBuf.put((byte) 0);
inBuf.flip();
- in = InStream.create(null, "test2", new ByteBuffer[]{inBuf}, new long[]{0},
+ in = InStream.create("test2", new ByteBuffer[]{inBuf}, new long[]{0},
inBuf.remaining(), codec, 300);
try {
in.read();
@@ -209,7 +209,7 @@ public class TestInStream {
for(int i=0; i < inBuf.length; ++i) {
inBuf[i].flip();
}
- InStream in = InStream.create(null, "test", inBuf,
+ InStream in = InStream.create("test", inBuf,
new long[]{0,483, 1625}, 1674, codec, 400);
assertEquals("compressed stream test position: 0 length: 1674 range: 0" +
" offset: 0 limit: 0 range 0 = 0 to 483;" +
@@ -226,7 +226,7 @@ public class TestInStream {
assertEquals(i, inStream.readInt());
}
- in = InStream.create(null, "test", new ByteBuffer[]{inBuf[1], inBuf[2]},
+ in = InStream.create("test", new ByteBuffer[]{inBuf[1], inBuf[2]},
new long[]{483, 1625}, 1674, codec, 400);
inStream = new DataInputStream(in);
positions[303].reset();
@@ -235,7 +235,7 @@ public class TestInStream {
assertEquals(i, inStream.readInt());
}
- in = InStream.create(null, "test", new ByteBuffer[]{inBuf[0], inBuf[2]},
+ in = InStream.create("test", new ByteBuffer[]{inBuf[0], inBuf[2]},
new long[]{0, 1625}, 1674, codec, 400);
inStream = new DataInputStream(in);
positions[1001].reset();
@@ -273,7 +273,7 @@ public class TestInStream {
for(int i=0; i < inBuf.length; ++i) {
inBuf[i].flip();
}
- InStream in = InStream.create(null, "test", inBuf,
+ InStream in = InStream.create("test", inBuf,
new long[]{0, 1024, 3072}, 4096, null, 400);
assertEquals("uncompressed stream test position: 0 length: 4096" +
" range: 0 offset: 0 limit: 0",
@@ -289,7 +289,7 @@ public class TestInStream {
assertEquals(i, inStream.readInt());
}
- in = InStream.create(null, "test", new ByteBuffer[]{inBuf[1], inBuf[2]},
+ in = InStream.create("test", new ByteBuffer[]{inBuf[1], inBuf[2]},
new long[]{1024, 3072}, 4096, null, 400);
inStream = new DataInputStream(in);
positions[256].reset();
@@ -298,7 +298,7 @@ public class TestInStream {
assertEquals(i, inStream.readInt());
}
- in = InStream.create(null, "test", new ByteBuffer[]{inBuf[0], inBuf[2]},
+ in = InStream.create("test", new ByteBuffer[]{inBuf[0], inBuf[2]},
new long[]{0, 3072}, 4096, null, 400);
inStream = new DataInputStream(in);
positions[768].reset();
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestIntegerCompressionReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestIntegerCompressionReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestIntegerCompressionReader.java
index 8e42a05..29dce30 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestIntegerCompressionReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestIntegerCompressionReader.java
@@ -55,7 +55,7 @@ public class TestIntegerCompressionReader {
inBuf.flip();
RunLengthIntegerReaderV2 in =
new RunLengthIntegerReaderV2(InStream.create
- (null, "test", new ByteBuffer[]{inBuf},
+ ("test", new ByteBuffer[]{inBuf},
new long[]{0}, inBuf.remaining(),
codec, 1000), true, false);
for(int i=0; i < 2048; ++i) {
@@ -108,7 +108,7 @@ public class TestIntegerCompressionReader {
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReaderV2 in =
- new RunLengthIntegerReaderV2(InStream.create(null, "test",
+ new RunLengthIntegerReaderV2(InStream.create("test",
new ByteBuffer[]{inBuf},
new long[]{0},
inBuf.remaining(),
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthByteReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthByteReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthByteReader.java
index 9c8e5b5..90541f0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthByteReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthByteReader.java
@@ -45,7 +45,7 @@ public class TestRunLengthByteReader {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- RunLengthByteReader in = new RunLengthByteReader(InStream.create(null, "test",
+ RunLengthByteReader in = new RunLengthByteReader(InStream.create("test",
new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(), null, 100));
for(int i=0; i < 2048; ++i) {
int x = in.next() & 0xff;
@@ -87,7 +87,7 @@ public class TestRunLengthByteReader {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- RunLengthByteReader in = new RunLengthByteReader(InStream.create(null, "test",
+ RunLengthByteReader in = new RunLengthByteReader(InStream.create("test",
new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(), codec, 500));
for(int i=0; i < 2048; ++i) {
int x = in.next() & 0xff;
@@ -124,7 +124,7 @@ public class TestRunLengthByteReader {
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
- RunLengthByteReader in = new RunLengthByteReader(InStream.create(null, "test",
+ RunLengthByteReader in = new RunLengthByteReader(InStream.create("test",
new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(), null, 100));
for(int i=0; i < 2048; i += 10) {
int x = in.next() & 0xff;
http://git-wip-us.apache.org/repos/asf/hive/blob/57f39a99/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthIntegerReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthIntegerReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthIntegerReader.java
index 9254101..562b489 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthIntegerReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRunLengthIntegerReader.java
@@ -54,7 +54,7 @@ public class TestRunLengthIntegerReader {
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReader in = new RunLengthIntegerReader(InStream.create
- (null, "test", new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
+ ("test", new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
codec, 1000), true);
for(int i=0; i < 2048; ++i) {
int x = (int) in.next();
@@ -106,7 +106,7 @@ public class TestRunLengthIntegerReader {
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReader in = new RunLengthIntegerReader(InStream.create
- (null, "test", new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
+ ("test", new ByteBuffer[]{inBuf}, new long[]{0}, inBuf.remaining(),
null, 100), true);
for(int i=0; i < 2048; i += 10) {
int x = (int) in.next();