You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/08/28 05:15:19 UTC
svn commit: r1621031 [2/10] - in /hive/branches/cbo: ./
common/src/java/org/apache/hadoop/hive/conf/
contrib/src/test/results/clientnegative/
contrib/src/test/results/clientpositive/ data/files/
hbase-handler/src/test/results/negative/ hcatalog/core/sr...
Modified: hive/branches/cbo/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (original)
+++ hive/branches/cbo/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java Thu Aug 28 03:15:13 2014
@@ -3735,6 +3735,515 @@ public final class OrcProto {
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
}
+ public interface TimestampStatisticsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional sint64 minimum = 1;
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ boolean hasMinimum();
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ long getMinimum();
+
+ // optional sint64 maximum = 2;
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ boolean hasMaximum();
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ long getMaximum();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics}
+ */
+ public static final class TimestampStatistics extends
+ com.google.protobuf.GeneratedMessage
+ implements TimestampStatisticsOrBuilder {
+ // Use TimestampStatistics.newBuilder() to construct.
+ private TimestampStatistics(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TimestampStatistics(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TimestampStatistics defaultInstance;
+ public static TimestampStatistics getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TimestampStatistics getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TimestampStatistics(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ minimum_ = input.readSInt64();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ maximum_ = input.readSInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TimestampStatistics> PARSER =
+ new com.google.protobuf.AbstractParser<TimestampStatistics>() {
+ public TimestampStatistics parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TimestampStatistics(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TimestampStatistics> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional sint64 minimum = 1;
+ public static final int MINIMUM_FIELD_NUMBER = 1;
+ private long minimum_;
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public boolean hasMinimum() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public long getMinimum() {
+ return minimum_;
+ }
+
+ // optional sint64 maximum = 2;
+ public static final int MAXIMUM_FIELD_NUMBER = 2;
+ private long maximum_;
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public boolean hasMaximum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public long getMaximum() {
+ return maximum_;
+ }
+
+ private void initFields() {
+ minimum_ = 0L;
+ maximum_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeSInt64(1, minimum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeSInt64(2, maximum_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeSInt64Size(1, minimum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeSInt64Size(2, maximum_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ minimum_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ maximum_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics build() {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics buildPartial() {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.minimum_ = minimum_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.maximum_ = maximum_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics) {
+ return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics other) {
+ if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) return this;
+ if (other.hasMinimum()) {
+ setMinimum(other.getMinimum());
+ }
+ if (other.hasMaximum()) {
+ setMaximum(other.getMaximum());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional sint64 minimum = 1;
+ private long minimum_ ;
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public boolean hasMinimum() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public long getMinimum() {
+ return minimum_;
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public Builder setMinimum(long value) {
+ bitField0_ |= 0x00000001;
+ minimum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public Builder clearMinimum() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ minimum_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional sint64 maximum = 2;
+ private long maximum_ ;
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public boolean hasMaximum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public long getMaximum() {
+ return maximum_;
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public Builder setMaximum(long value) {
+ bitField0_ |= 0x00000002;
+ maximum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public Builder clearMaximum() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ maximum_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics)
+ }
+
+ static {
+ defaultInstance = new TimestampStatistics(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics)
+ }
+
public interface BinaryStatisticsOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -4273,6 +4782,20 @@ public final class OrcProto {
* <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
*/
org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder();
+
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ boolean hasTimestampStatistics();
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics();
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder();
}
/**
* Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.ColumnStatistics}
@@ -4421,6 +4944,19 @@ public final class OrcProto {
bitField0_ |= 0x00000080;
break;
}
+ case 74: {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ subBuilder = timestampStatistics_.toBuilder();
+ }
+ timestampStatistics_ = input.readMessage(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(timestampStatistics_);
+ timestampStatistics_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000100;
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4631,6 +5167,28 @@ public final class OrcProto {
return binaryStatistics_;
}
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ public static final int TIMESTAMPSTATISTICS_FIELD_NUMBER = 9;
+ private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_;
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public boolean hasTimestampStatistics() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() {
+ return timestampStatistics_;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() {
+ return timestampStatistics_;
+ }
+
private void initFields() {
numberOfValues_ = 0L;
intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance();
@@ -4640,6 +5198,7 @@ public final class OrcProto {
decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance();
dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -4677,6 +5236,9 @@ public final class OrcProto {
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(8, binaryStatistics_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeMessage(9, timestampStatistics_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4718,6 +5280,10 @@ public final class OrcProto {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, binaryStatistics_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(9, timestampStatistics_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4833,6 +5399,7 @@ public final class OrcProto {
getDecimalStatisticsFieldBuilder();
getDateStatisticsFieldBuilder();
getBinaryStatisticsFieldBuilder();
+ getTimestampStatisticsFieldBuilder();
}
}
private static Builder create() {
@@ -4885,6 +5452,12 @@ public final class OrcProto {
binaryStatisticsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ } else {
+ timestampStatisticsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
@@ -4973,6 +5546,14 @@ public final class OrcProto {
} else {
result.binaryStatistics_ = binaryStatisticsBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ if (timestampStatisticsBuilder_ == null) {
+ result.timestampStatistics_ = timestampStatistics_;
+ } else {
+ result.timestampStatistics_ = timestampStatisticsBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -5013,6 +5594,9 @@ public final class OrcProto {
if (other.hasBinaryStatistics()) {
mergeBinaryStatistics(other.getBinaryStatistics());
}
+ if (other.hasTimestampStatistics()) {
+ mergeTimestampStatistics(other.getTimestampStatistics());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -5892,6 +6476,123 @@ public final class OrcProto {
return binaryStatisticsBuilder_;
}
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder> timestampStatisticsBuilder_;
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public boolean hasTimestampStatistics() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() {
+ if (timestampStatisticsBuilder_ == null) {
+ return timestampStatistics_;
+ } else {
+ return timestampStatisticsBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder setTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) {
+ if (timestampStatisticsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ timestampStatistics_ = value;
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder setTimestampStatistics(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder builderForValue) {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = builderForValue.build();
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder mergeTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) {
+ if (timestampStatisticsBuilder_ == null) {
+ if (((bitField0_ & 0x00000100) == 0x00000100) &&
+ timestampStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) {
+ timestampStatistics_ =
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder(timestampStatistics_).mergeFrom(value).buildPartial();
+ } else {
+ timestampStatistics_ = value;
+ }
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder clearTimestampStatistics() {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder getTimestampStatisticsBuilder() {
+ bitField0_ |= 0x00000100;
+ onChanged();
+ return getTimestampStatisticsFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() {
+ if (timestampStatisticsBuilder_ != null) {
+ return timestampStatisticsBuilder_.getMessageOrBuilder();
+ } else {
+ return timestampStatistics_;
+ }
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>
+ getTimestampStatisticsFieldBuilder() {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>(
+ timestampStatistics_,
+ getParentForChildren(),
+ isClean());
+ timestampStatistics_ = null;
+ }
+ return timestampStatisticsBuilder_;
+ }
+
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics)
}
@@ -16654,6 +17355,11 @@ public final class OrcProto {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -16742,74 +17448,78 @@ public final class OrcProto {
"nt\030\001 \003(\004B\002\020\001\"B\n\021DecimalStatistics\022\017\n\007min" +
"imum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t" +
"\"2\n\016DateStatistics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007m",
- "aximum\030\002 \001(\021\"\037\n\020BinaryStatistics\022\013\n\003sum\030" +
- "\001 \001(\022\"\310\004\n\020ColumnStatistics\022\026\n\016numberOfVa" +
- "lues\030\001 \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org." +
- "apache.hadoop.hive.ql.io.orc.IntegerStat" +
- "istics\022L\n\020doubleStatistics\030\003 \001(\01322.org.a" +
- "pache.hadoop.hive.ql.io.orc.DoubleStatis" +
- "tics\022L\n\020stringStatistics\030\004 \001(\01322.org.apa" +
- "che.hadoop.hive.ql.io.orc.StringStatisti" +
- "cs\022L\n\020bucketStatistics\030\005 \001(\01322.org.apach" +
- "e.hadoop.hive.ql.io.orc.BucketStatistics",
- "\022N\n\021decimalStatistics\030\006 \001(\01323.org.apache" +
- ".hadoop.hive.ql.io.orc.DecimalStatistics" +
- "\022H\n\016dateStatistics\030\007 \001(\01320.org.apache.ha" +
- "doop.hive.ql.io.orc.DateStatistics\022L\n\020bi" +
- "naryStatistics\030\010 \001(\01322.org.apache.hadoop" +
- ".hive.ql.io.orc.BinaryStatistics\"n\n\rRowI" +
- "ndexEntry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstat" +
- "istics\030\002 \001(\01322.org.apache.hadoop.hive.ql" +
- ".io.orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005" +
- "entry\030\001 \003(\0132/.org.apache.hadoop.hive.ql.",
- "io.orc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030" +
- "\001 \002(\0162-.org.apache.hadoop.hive.ql.io.orc" +
- ".Stream.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003" +
- " \001(\004\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006L" +
- "ENGTH\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONA" +
- "RY_COUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006" +
- "\"\263\001\n\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org." +
- "apache.hadoop.hive.ql.io.orc.ColumnEncod" +
- "ing.Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind" +
- "\022\n\n\006DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V",
- "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"\214\001\n\014StripeFooter\022" +
- "9\n\007streams\030\001 \003(\0132(.org.apache.hadoop.hiv" +
- "e.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320.or" +
- "g.apache.hadoop.hive.ql.io.orc.ColumnEnc" +
- "oding\"\370\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apach" +
- "e.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010sub" +
- "types\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rm" +
- "aximumLength\030\004 \001(\r\022\021\n\tprecision\030\005 \001(\r\022\r\n" +
- "\005scale\030\006 \001(\r\"\321\001\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BY" +
- "TE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FL",
- "OAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020" +
- "\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006S" +
- "TRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020" +
- "\017\022\013\n\007VARCHAR\020\020\022\010\n\004CHAR\020\021\"x\n\021StripeInform" +
- "ation\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001" +
- "(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength\030\004" +
- " \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMetada" +
- "taItem\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"X\n\020S" +
- "tripeStatistics\022D\n\010colStats\030\001 \003(\01322.org." +
- "apache.hadoop.hive.ql.io.orc.ColumnStati",
- "stics\"S\n\010Metadata\022G\n\013stripeStats\030\001 \003(\01322" +
- ".org.apache.hadoop.hive.ql.io.orc.Stripe" +
- "Statistics\"\356\002\n\006Footer\022\024\n\014headerLength\030\001 " +
- "\001(\004\022\025\n\rcontentLength\030\002 \001(\004\022D\n\007stripes\030\003 " +
- "\003(\01323.org.apache.hadoop.hive.ql.io.orc.S" +
- "tripeInformation\0225\n\005types\030\004 \003(\0132&.org.ap" +
- "ache.hadoop.hive.ql.io.orc.Type\022D\n\010metad" +
- "ata\030\005 \003(\01322.org.apache.hadoop.hive.ql.io" +
- ".orc.UserMetadataItem\022\024\n\014numberOfRows\030\006 " +
- "\001(\004\022F\n\nstatistics\030\007 \003(\01322.org.apache.had",
- "oop.hive.ql.io.orc.ColumnStatistics\022\026\n\016r" +
- "owIndexStride\030\010 \001(\r\"\305\001\n\nPostScript\022\024\n\014fo" +
- "oterLength\030\001 \001(\004\022F\n\013compression\030\002 \001(\01621." +
- "org.apache.hadoop.hive.ql.io.orc.Compres" +
- "sionKind\022\034\n\024compressionBlockSize\030\003 \001(\004\022\023" +
- "\n\007version\030\004 \003(\rB\002\020\001\022\026\n\016metadataLength\030\005 " +
- "\001(\004\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKind\022\010" +
- "\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
+ "aximum\030\002 \001(\021\"7\n\023TimestampStatistics\022\017\n\007m" +
+ "inimum\030\001 \001(\022\022\017\n\007maximum\030\002 \001(\022\"\037\n\020BinaryS" +
+ "tatistics\022\013\n\003sum\030\001 \001(\022\"\234\005\n\020ColumnStatist" +
+ "ics\022\026\n\016numberOfValues\030\001 \001(\004\022J\n\rintStatis" +
+ "tics\030\002 \001(\01323.org.apache.hadoop.hive.ql.i" +
+ "o.orc.IntegerStatistics\022L\n\020doubleStatist" +
+ "ics\030\003 \001(\01322.org.apache.hadoop.hive.ql.io" +
+ ".orc.DoubleStatistics\022L\n\020stringStatistic" +
+ "s\030\004 \001(\01322.org.apache.hadoop.hive.ql.io.o" +
+ "rc.StringStatistics\022L\n\020bucketStatistics\030",
+ "\005 \001(\01322.org.apache.hadoop.hive.ql.io.orc" +
+ ".BucketStatistics\022N\n\021decimalStatistics\030\006" +
+ " \001(\01323.org.apache.hadoop.hive.ql.io.orc." +
+ "DecimalStatistics\022H\n\016dateStatistics\030\007 \001(" +
+ "\01320.org.apache.hadoop.hive.ql.io.orc.Dat" +
+ "eStatistics\022L\n\020binaryStatistics\030\010 \001(\01322." +
+ "org.apache.hadoop.hive.ql.io.orc.BinaryS" +
+ "tatistics\022R\n\023timestampStatistics\030\t \001(\01325" +
+ ".org.apache.hadoop.hive.ql.io.orc.Timest" +
+ "ampStatistics\"n\n\rRowIndexEntry\022\025\n\tpositi",
+ "ons\030\001 \003(\004B\002\020\001\022F\n\nstatistics\030\002 \001(\01322.org." +
+ "apache.hadoop.hive.ql.io.orc.ColumnStati" +
+ "stics\"J\n\010RowIndex\022>\n\005entry\030\001 \003(\0132/.org.a" +
+ "pache.hadoop.hive.ql.io.orc.RowIndexEntr" +
+ "y\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(\0162-.org.apache." +
+ "hadoop.hive.ql.io.orc.Stream.Kind\022\016\n\006col" +
+ "umn\030\002 \001(\r\022\016\n\006length\030\003 \001(\004\"r\n\004Kind\022\013\n\007PRE" +
+ "SENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGTH\020\002\022\023\n\017DICTIONA" +
+ "RY_DATA\020\003\022\024\n\020DICTIONARY_COUNT\020\004\022\r\n\tSECON" +
+ "DARY\020\005\022\r\n\tROW_INDEX\020\006\"\263\001\n\016ColumnEncoding",
+ "\022C\n\004kind\030\001 \002(\01625.org.apache.hadoop.hive." +
+ "ql.io.orc.ColumnEncoding.Kind\022\026\n\016diction" +
+ "arySize\030\002 \001(\r\"D\n\004Kind\022\n\n\006DIRECT\020\000\022\016\n\nDIC" +
+ "TIONARY\020\001\022\r\n\tDIRECT_V2\020\002\022\021\n\rDICTIONARY_V" +
+ "2\020\003\"\214\001\n\014StripeFooter\0229\n\007streams\030\001 \003(\0132(." +
+ "org.apache.hadoop.hive.ql.io.orc.Stream\022" +
+ "A\n\007columns\030\002 \003(\01320.org.apache.hadoop.hiv" +
+ "e.ql.io.orc.ColumnEncoding\"\370\002\n\004Type\0229\n\004k" +
+ "ind\030\001 \002(\0162+.org.apache.hadoop.hive.ql.io" +
+ ".orc.Type.Kind\022\024\n\010subtypes\030\002 \003(\rB\002\020\001\022\022\n\n",
+ "fieldNames\030\003 \003(\t\022\025\n\rmaximumLength\030\004 \001(\r\022" +
+ "\021\n\tprecision\030\005 \001(\r\022\r\n\005scale\030\006 \001(\r\"\321\001\n\004Ki" +
+ "nd\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003" +
+ "INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n" +
+ "\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n" +
+ "\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022" +
+ "\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\022\013\n\007VARCHAR\020\020\022\010\n\004C" +
+ "HAR\020\021\"x\n\021StripeInformation\022\016\n\006offset\030\001 \001" +
+ "(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\ndataLength\030\003 " +
+ "\001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014numberOfRow",
+ "s\030\005 \001(\004\"/\n\020UserMetadataItem\022\014\n\004name\030\001 \002(" +
+ "\t\022\r\n\005value\030\002 \002(\014\"X\n\020StripeStatistics\022D\n\010" +
+ "colStats\030\001 \003(\01322.org.apache.hadoop.hive." +
+ "ql.io.orc.ColumnStatistics\"S\n\010Metadata\022G" +
+ "\n\013stripeStats\030\001 \003(\01322.org.apache.hadoop." +
+ "hive.ql.io.orc.StripeStatistics\"\356\002\n\006Foot" +
+ "er\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rcontentLengt" +
+ "h\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apache.ha" +
+ "doop.hive.ql.io.orc.StripeInformation\0225\n" +
+ "\005types\030\004 \003(\0132&.org.apache.hadoop.hive.ql",
+ ".io.orc.Type\022D\n\010metadata\030\005 \003(\01322.org.apa" +
+ "che.hadoop.hive.ql.io.orc.UserMetadataIt" +
+ "em\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007" +
+ " \003(\01322.org.apache.hadoop.hive.ql.io.orc." +
+ "ColumnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r" +
+ "\"\305\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" +
+ "\013compression\030\002 \001(\01621.org.apache.hadoop.h" +
+ "ive.ql.io.orc.CompressionKind\022\034\n\024compres" +
+ "sionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001" +
+ "\022\026\n\016metadataLength\030\005 \001(\004\022\016\n\005magic\030\300> \001(\t",
+ "*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022" +
+ "\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -16852,86 +17562,92 @@ public final class OrcProto {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor,
new java.lang.String[] { "Minimum", "Maximum", });
- internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor =
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor =
getDescriptor().getMessageTypes().get(6);
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor,
+ new java.lang.String[] { "Minimum", "Maximum", });
+ internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor =
+ getDescriptor().getMessageTypes().get(7);
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor,
new java.lang.String[] { "Sum", });
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor,
- new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", });
+ new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", "TimestampStatistics", });
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor,
new java.lang.String[] { "Positions", "Statistics", });
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor,
new java.lang.String[] { "Entry", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor,
new java.lang.String[] { "Kind", "Column", "Length", });
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor,
new java.lang.String[] { "Kind", "DictionarySize", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor,
new java.lang.String[] { "Streams", "Columns", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor,
new java.lang.String[] { "Kind", "Subtypes", "FieldNames", "MaximumLength", "Precision", "Scale", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor,
new java.lang.String[] { "Offset", "IndexLength", "DataLength", "FooterLength", "NumberOfRows", });
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor,
new java.lang.String[] { "ColStats", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor,
new java.lang.String[] { "StripeStats", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor,
new java.lang.String[] { "HeaderLength", "ContentLength", "Stripes", "Types", "Metadata", "NumberOfRows", "Statistics", "RowIndexStride", });
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor,
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Thu Aug 28 03:15:13 2014
@@ -92,8 +92,8 @@ public class FileSinkOperator extends Te
protected transient ListBucketingCtx lbCtx;
protected transient boolean isSkewedStoredAsSubDirectories;
protected transient boolean statsCollectRawDataSize;
- private transient boolean[] statsFromRecordWriter;
- private transient boolean isCollectRWStats;
+ protected transient boolean[] statsFromRecordWriter;
+ protected transient boolean isCollectRWStats;
private transient FSPaths prevFsp;
private transient FSPaths fpaths;
private transient ObjectInspector keyOI;
@@ -626,7 +626,7 @@ public class FileSinkOperator extends Te
}
}
- private boolean areAllTrue(boolean[] statsFromRW) {
+ protected boolean areAllTrue(boolean[] statsFromRW) {
for(boolean b : statsFromRW) {
if (!b) {
return false;
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Thu Aug 28 03:15:13 2014
@@ -111,6 +111,7 @@ import org.apache.hadoop.hive.ql.exec.mr
import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
import org.apache.hadoop.hive.ql.exec.mr.ExecReducer;
import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
+import org.apache.hadoop.hive.ql.exec.tez.DagUtils;
import org.apache.hadoop.hive.ql.exec.tez.TezTask;
import org.apache.hadoop.hive.ql.io.ContentSummaryInputFormat;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -1538,6 +1539,24 @@ public final class Utilities {
Pattern.compile("^.*?([0-9]+)(_[0-9]{1,6})?(\\..*)?$");
/**
+ * Some jobs like "INSERT INTO" jobs create copies of files like 0000001_0_copy_2.
+ * For such files,
+ * Group 1: 00000001 [taskId]
+ * Group 3: 0 [task attempId]
+ * Group 4: _copy_2 [copy suffix]
+ * Group 6: copy [copy keyword]
+ * Group 8: 2 [copy file index]
+ */
+ private static final Pattern COPY_FILE_NAME_TO_TASK_ID_REGEX =
+ Pattern.compile("^.*?"+ // any prefix
+ "([0-9]+)"+ // taskId
+ "(_)"+ // separator
+ "([0-9]{1,6})?"+ // attemptId (limited to 6 digits)
+ "((_)(\\Bcopy\\B)(_)"+ // copy keyword
+ "([0-9]{1,6})$)?"+ // copy file index
+ "(\\..*)?$"); // any suffix/file extension
+
+ /**
* This retruns prefix part + taskID for bucket join for partitioned table
*/
private static final Pattern FILE_NAME_PREFIXED_TASK_ID_REGEX =
@@ -1862,21 +1881,42 @@ public final class Utilities {
// speculative runs), but the largest should be the correct one since the result
// of a successful run should never be smaller than a failed/speculative run.
FileStatus toDelete = null;
- if (otherFile.getLen() >= one.getLen()) {
- toDelete = one;
- } else {
- toDelete = otherFile;
- taskIdToFile.put(taskId, one);
- }
- long len1 = toDelete.getLen();
- long len2 = taskIdToFile.get(taskId).getLen();
- if (!fs.delete(toDelete.getPath(), true)) {
- throw new IOException("Unable to delete duplicate file: " + toDelete.getPath()
- + ". Existing file: " + taskIdToFile.get(taskId).getPath());
+
+ // "LOAD .. INTO" and "INSERT INTO" commands will generate files with
+ // "_copy_x" suffix. These files are usually read by map tasks and the
+ // task output gets written to some tmp path. The output file names will
+ // be of format taskId_attemptId. The usual path for all these tasks is
+ // srcPath -> taskTmpPath -> tmpPath -> finalPath.
+ // But, MergeFileTask can move files directly from src path to final path
+ // without copying it to tmp path. In such cases, different files with
+ // "_copy_x" suffix will be identified as duplicates (change in value
+ // of x is wrongly identified as attempt id) and will be deleted.
+ // To avoid that we will ignore files with "_copy_x" suffix from duplicate
+ // elimination.
+ if (!isCopyFile(one.getPath().getName())) {
+ if (otherFile.getLen() >= one.getLen()) {
+ toDelete = one;
+ } else {
+ toDelete = otherFile;
+ taskIdToFile.put(taskId, one);
+ }
+ long len1 = toDelete.getLen();
+ long len2 = taskIdToFile.get(taskId).getLen();
+ if (!fs.delete(toDelete.getPath(), true)) {
+ throw new IOException(
+ "Unable to delete duplicate file: " + toDelete.getPath()
+ + ". Existing file: " +
+ taskIdToFile.get(taskId).getPath());
+ } else {
+ LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() +
+ " with length "
+ + len1 + ". Existing file: " +
+ taskIdToFile.get(taskId).getPath() + " with length "
+ + len2);
+ }
} else {
- LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + " with length "
- + len1 + ". Existing file: " + taskIdToFile.get(taskId).getPath() + " with length "
- + len2);
+ LOG.info(one.getPath() + " file identified as duplicate. This file is" +
+ " not deleted as it has copySuffix.");
}
}
}
@@ -1884,6 +1924,29 @@ public final class Utilities {
return taskIdToFile;
}
+ public static boolean isCopyFile(String filename) {
+ String taskId = filename;
+ String copyFileSuffix = null;
+ int dirEnd = filename.lastIndexOf(Path.SEPARATOR);
+ if (dirEnd != -1) {
+ taskId = filename.substring(dirEnd + 1);
+ }
+ Matcher m = COPY_FILE_NAME_TO_TASK_ID_REGEX.matcher(taskId);
+ if (!m.matches()) {
+ LOG.warn("Unable to verify if file name " + filename + " has _copy_ suffix.");
+ } else {
+ taskId = m.group(1);
+ copyFileSuffix = m.group(4);
+ }
+
+ LOG.debug("Filename: " + filename + " TaskId: " + taskId + " CopySuffix: " + copyFileSuffix);
+ if (taskId != null && copyFileSuffix != null) {
+ return true;
+ }
+
+ return false;
+ }
+
public static String getNameMessage(Exception e) {
return e.getClass().getName() + "(" + e.getMessage() + ")";
}
@@ -3012,7 +3075,7 @@ public final class Utilities {
* so we don't want to depend on scratch dir and context.
*/
public static List<Path> getInputPathsTez(JobConf job, MapWork work) throws Exception {
- String scratchDir = HiveConf.getVar(job, HiveConf.ConfVars.SCRATCHDIR);
+ String scratchDir = job.get(DagUtils.TEZ_TMP_DIR_KEY);
// we usually don't want to create dummy files for tez, however the metadata only
// optimization relies on it.
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java Thu Aug 28 03:15:13 2014
@@ -124,6 +124,7 @@ import com.google.protobuf.ByteString;
*/
public class DagUtils {
+ public static final String TEZ_TMP_DIR_KEY = "_hive_tez_tmp_dir";
private static final Log LOG = LogFactory.getLog(DagUtils.class.getName());
private static final String TEZ_DIR = "_tez_scratch_dir";
private static DagUtils instance;
@@ -158,7 +159,7 @@ public class DagUtils {
* Creates the configuration object necessary to run a specific vertex from
* map work. This includes input formats, input processor, etc.
*/
- private JobConf initializeVertexConf(JobConf baseConf, MapWork mapWork) {
+ private JobConf initializeVertexConf(JobConf baseConf, Context context, MapWork mapWork) {
JobConf conf = new JobConf(baseConf);
if (mapWork.getNumMapTasks() != null) {
@@ -200,6 +201,7 @@ public class DagUtils {
inpFormat = CombineHiveInputFormat.class.getName();
}
+ conf.set(TEZ_TMP_DIR_KEY, context.getMRTmpPath().toUri().toString());
conf.set("mapred.mapper.class", ExecMapper.class.getName());
conf.set("mapred.input.format.class", inpFormat);
@@ -524,7 +526,7 @@ public class DagUtils {
/*
* Helper function to create JobConf for specific ReduceWork.
*/
- private JobConf initializeVertexConf(JobConf baseConf, ReduceWork reduceWork) {
+ private JobConf initializeVertexConf(JobConf baseConf, Context context, ReduceWork reduceWork) {
JobConf conf = new JobConf(baseConf);
conf.set("mapred.reducer.class", ExecReducer.class.getName());
@@ -896,14 +898,14 @@ public class DagUtils {
* @param work BaseWork will be used to populate the configuration object.
* @return JobConf new configuration object
*/
- public JobConf initializeVertexConf(JobConf conf, BaseWork work) {
+ public JobConf initializeVertexConf(JobConf conf, Context context, BaseWork work) {
// simply dispatch the call to the right method for the actual (sub-) type of
// BaseWork.
if (work instanceof MapWork) {
- return initializeVertexConf(conf, (MapWork)work);
+ return initializeVertexConf(conf, context, (MapWork)work);
} else if (work instanceof ReduceWork) {
- return initializeVertexConf(conf, (ReduceWork)work);
+ return initializeVertexConf(conf, context, (ReduceWork)work);
} else {
assert false;
return null;
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java Thu Aug 28 03:15:13 2014
@@ -263,7 +263,7 @@ public class TezTask extends Task<TezWor
}
} else {
// Regular vertices
- JobConf wxConf = utils.initializeVertexConf(conf, w);
+ JobConf wxConf = utils.initializeVertexConf(conf, ctx, w);
Vertex wx = utils.createVertex(wxConf, w, scratchDir, appJarLr,
additionalLr, fs, ctx, !isFinal, work);
dag.addVertex(wx);
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java Thu Aug 28 03:15:13 2014
@@ -145,7 +145,11 @@ public class VectorFileSinkOperator exte
}
rowOutWriters = fpaths.getOutWriters();
- if (conf.isGatherStats()) {
+ // check if all record writers implement statistics. if atleast one RW
+ // doesn't implement stats interface we will fallback to conventional way
+ // of gathering stats
+ isCollectRWStats = areAllTrue(statsFromRecordWriter);
+ if (conf.isGatherStats() && !isCollectRWStats) {
if (statsCollectRawDataSize) {
SerDeStats stats = serializer.getSerDeStats();
if (stats != null) {
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java Thu Aug 28 03:15:13 2014
@@ -22,6 +22,7 @@ import java.io.Serializable;
import java.net.URI;
import java.util.Map;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.metadata.DummyPartition;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -276,11 +277,13 @@ public class Entity implements Serializa
}
if (typ == Type.TABLE) {
- return t.getDataLocation().toUri();
+ Path path = t.getDataLocation();
+ return path == null ? null : path.toUri();
}
if (typ == Type.PARTITION) {
- return p.getDataLocation().toUri();
+ Path path = p.getDataLocation();
+ return path == null ? null : path.toUri();
}
if (typ == Type.DFS_DIR || typ == Type.LOCAL_DIR) {
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java Thu Aug 28 03:15:13 2014
@@ -66,7 +66,7 @@ public class UpdateInputAccessTimeHook {
case TABLE: {
Table t = db.getTable(re.getTable().getTableName());
t.setLastAccessTime(lastAccessTime);
- db.alterTable(t.getTableName(), t);
+ db.alterTable(t.getDbName() + "." + t.getTableName(), t);
break;
}
case PARTITION: {
@@ -76,7 +76,7 @@ public class UpdateInputAccessTimeHook {
p.setLastAccessTime(lastAccessTime);
db.alterPartition(t.getTableName(), p);
t.setLastAccessTime(lastAccessTime);
- db.alterTable(t.getTableName(), t);
+ db.alterTable(t.getDbName() + "." + t.getTableName(), t);
break;
}
default:
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java Thu Aug 28 03:15:13 2014
@@ -77,7 +77,7 @@ public class IndexMetadataChangeTask ext
FileSystem fs = url.getFileSystem(conf);
FileStatus fstat = fs.getFileStatus(url);
tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
- db.alterTable(tbl.getTableName(), tbl);
+ db.alterTable(tbl.getDbName() + "." + tbl.getTableName(), tbl);
}
} catch (Exception e) {
e.printStackTrace();
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java Thu Aug 28 03:15:13 2014
@@ -233,7 +233,8 @@ public class BitmapIndexHandler extends
StringBuilder command= new StringBuilder();
LinkedHashMap<String, String> partSpec = indexTblPartDesc.getPartSpec();
- command.append("INSERT OVERWRITE TABLE " + HiveUtils.unparseIdentifier(indexTableName ));
+ command.append("INSERT OVERWRITE TABLE " +
+ HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(indexTableName ));
if (partitioned && indexTblPartDesc != null) {
command.append(" PARTITION ( ");
List<String> ret = getPartKVPairStringArray(partSpec);
@@ -257,7 +258,8 @@ public class BitmapIndexHandler extends
command.append("EWAH_BITMAP(");
command.append(VirtualColumn.ROWOFFSET.getName());
command.append(")");
- command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName) );
+ command.append(" FROM " +
+ HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(baseTableName));
LinkedHashMap<String, String> basePartSpec = baseTablePartDesc.getPartSpec();
if(basePartSpec != null) {
command.append(" WHERE ");
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java Thu Aug 28 03:15:13 2014
@@ -103,7 +103,8 @@ public class CompactIndexHandler extends
StringBuilder command= new StringBuilder();
LinkedHashMap<String, String> partSpec = indexTblPartDesc.getPartSpec();
- command.append("INSERT OVERWRITE TABLE " + HiveUtils.unparseIdentifier(indexTableName ));
+ command.append("INSERT OVERWRITE TABLE " +
+ HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(indexTableName ));
if (partitioned && indexTblPartDesc != null) {
command.append(" PARTITION ( ");
List<String> ret = getPartKVPairStringArray(partSpec);
@@ -126,7 +127,8 @@ public class CompactIndexHandler extends
command.append(" collect_set (");
command.append(VirtualColumn.BLOCKOFFSET.getName());
command.append(") ");
- command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName) );
+ command.append(" FROM " +
+ HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(baseTableName));
LinkedHashMap<String, String> basePartSpec = baseTablePartDesc.getPartSpec();
if(basePartSpec != null) {
command.append(" WHERE ");
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java Thu Aug 28 03:15:13 2014
@@ -18,8 +18,6 @@
package org.apache.hadoop.hive.ql.io.merge;
-import java.io.IOException;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
@@ -31,6 +29,10 @@ import org.apache.hadoop.hive.ql.metadat
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
public class MergeMapper extends MapReduceBase {
protected JobConf jc;
protected Path finalPath;
@@ -48,6 +50,7 @@ public class MergeMapper extends MapRedu
protected Path tmpPath;
protected Path taskTmpPath;
protected Path dpPath;
+ protected Set<Path> incompatFileSet;
public final static Log LOG = LogFactory.getLog("MergeMapper");
@@ -62,6 +65,7 @@ public class MergeMapper extends MapRedu
HiveConf.ConfVars.HIVEMERGECURRENTJOBCONCATENATELISTBUCKETINGDEPTH);
Path specPath = MergeOutputFormat.getMergeOutputPath(job);
+ incompatFileSet = new HashSet<Path>();
Path tmpPath = Utilities.toTempPath(specPath);
Path taskTmpPath = Utilities.toTaskTempPath(specPath);
updatePaths(tmpPath, taskTmpPath);
@@ -176,6 +180,23 @@ public class MergeMapper extends MapRedu
if (!fs.rename(outPath, finalPath)) {
throw new IOException("Unable to rename output to " + finalPath);
}
+
+ // move any incompatible files to final path
+ if (!incompatFileSet.isEmpty()) {
+ for (Path incompatFile : incompatFileSet) {
+ String fileName = incompatFile.getName();
+ Path destFile = new Path(finalPath.getParent(), fileName);
+ try {
+ Utilities.renameOrMoveFiles(fs, incompatFile, destFile);
+ LOG.info("Moved incompatible file " + incompatFile + " to "
+ + destFile);
+ } catch (HiveException e) {
+ LOG.error("Unable to move " + incompatFile + " to " + destFile);
+ throw new IOException(e);
+ }
+ }
+ }
+
} else {
if (!autoDelete) {
fs.delete(outPath, true);
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java Thu Aug 28 03:15:13 2014
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hive.ql.io.orc;
+import java.sql.Timestamp;
+
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -716,6 +718,99 @@ class ColumnStatisticsImpl implements Co
}
}
+ private static final class TimestampStatisticsImpl extends ColumnStatisticsImpl
+ implements TimestampColumnStatistics {
+ private Long minimum = null;
+ private Long maximum = null;
+
+ TimestampStatisticsImpl() {
+ }
+
+ TimestampStatisticsImpl(OrcProto.ColumnStatistics stats) {
+ super(stats);
+ OrcProto.TimestampStatistics timestampStats = stats.getTimestampStatistics();
+ // min,max values serialized/deserialized as int (milliseconds since epoch)
+ if (timestampStats.hasMaximum()) {
+ maximum = timestampStats.getMaximum();
+ }
+ if (timestampStats.hasMinimum()) {
+ minimum = timestampStats.getMinimum();
+ }
+ }
+
+ @Override
+ void reset() {
+ super.reset();
+ minimum = null;
+ maximum = null;
+ }
+
+ @Override
+ void updateTimestamp(Timestamp value) {
+ if (minimum == null) {
+ minimum = value.getTime();
+ maximum = value.getTime();
+ } else if (minimum > value.getTime()) {
+ minimum = value.getTime();
+ } else if (maximum < value.getTime()) {
+ maximum = value.getTime();
+ }
+ }
+
+ @Override
+ void merge(ColumnStatisticsImpl other) {
+ super.merge(other);
+ TimestampStatisticsImpl timestampStats = (TimestampStatisticsImpl) other;
+ if (minimum == null) {
+ minimum = timestampStats.minimum;
+ maximum = timestampStats.maximum;
+ } else if (timestampStats.minimum != null) {
+ if (minimum > timestampStats.minimum) {
+ minimum = timestampStats.minimum;
+ } else if (maximum < timestampStats.maximum) {
+ maximum = timestampStats.maximum;
+ }
+ }
+ }
+
+ @Override
+ OrcProto.ColumnStatistics.Builder serialize() {
+ OrcProto.ColumnStatistics.Builder result = super.serialize();
+ OrcProto.TimestampStatistics.Builder timestampStats = OrcProto.TimestampStatistics
+ .newBuilder();
+ if (getNumberOfValues() != 0) {
+ timestampStats.setMinimum(minimum);
+ timestampStats.setMaximum(maximum);
+ }
+ result.setTimestampStatistics(timestampStats);
+ return result;
+ }
+
+ @Override
+ public Timestamp getMinimum() {
+ Timestamp minTimestamp = new Timestamp(minimum);
+ return minTimestamp;
+ }
+
+ @Override
+ public Timestamp getMaximum() {
+ Timestamp maxTimestamp = new Timestamp(maximum);
+ return maxTimestamp;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder buf = new StringBuilder(super.toString());
+ if (getNumberOfValues() != 0) {
+ buf.append(" min: ");
+ buf.append(minimum);
+ buf.append(" max: ");
+ buf.append(maximum);
+ }
+ return buf.toString();
+ }
+ }
+
private long count = 0;
ColumnStatisticsImpl(OrcProto.ColumnStatistics stats) {
@@ -759,6 +854,10 @@ class ColumnStatisticsImpl implements Co
throw new UnsupportedOperationException("Can't update date");
}
+ void updateTimestamp(Timestamp value) {
+ throw new UnsupportedOperationException("Can't update timestamp");
+ }
+
void merge(ColumnStatisticsImpl stats) {
count += stats.count;
}
@@ -806,6 +905,8 @@ class ColumnStatisticsImpl implements Co
return new DecimalStatisticsImpl();
case DATE:
return new DateStatisticsImpl();
+ case TIMESTAMP:
+ return new TimestampStatisticsImpl();
case BINARY:
return new BinaryStatisticsImpl();
default:
@@ -829,6 +930,8 @@ class ColumnStatisticsImpl implements Co
return new DecimalStatisticsImpl(stats);
} else if (stats.hasDateStatistics()) {
return new DateStatisticsImpl(stats);
+ } else if (stats.hasTimestampStatistics()) {
+ return new TimestampStatisticsImpl(stats);
} else if(stats.hasBinaryStatistics()) {
return new BinaryStatisticsImpl(stats);
} else {
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java Thu Aug 28 03:15:13 2014
@@ -120,14 +120,14 @@ public final class FileDump {
RowIndex[] indices = rows.readRowIndex(stripeIx);
for (int col : rowIndexCols) {
StringBuilder buf = new StringBuilder();
- buf.append(" Column ").append(col).append(": row index");
+ buf.append(" Row group index column ").append(col).append(":");
RowIndex index = null;
if ((col >= indices.length) || ((index = indices[col]) == null)) {
buf.append(" not found\n");
continue;
}
for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) {
- buf.append(" RG ").append(entryIx).append(": ");
+ buf.append("\n Entry ").append(entryIx).append(":");
RowIndexEntry entry = index.getEntry(entryIx);
if (entry == null) {
buf.append("unknown\n");
@@ -139,15 +139,17 @@ public final class FileDump {
} else {
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(colStats);
Object min = RecordReaderImpl.getMin(cs), max = RecordReaderImpl.getMax(cs);
- buf.append("[").append(min).append(", ").append(max).append(") at ");
+ buf.append(" count: ").append(cs.getNumberOfValues());
+ buf.append(" min: ").append(min);
+ buf.append(" max: ").append(max);
}
+ buf.append(" positions: ");
for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) {
if (posIx != 0) {
buf.append(",");
}
buf.append(entry.getPositions(posIx));
}
- buf.append("\n");
}
System.out.println(buf);
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java Thu Aug 28 03:15:13 2014
@@ -18,20 +18,24 @@
package org.apache.hadoop.hive.ql.io.orc;
-import java.io.IOException;
-import java.util.List;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.merge.MergeMapper;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.shims.CombineHiveKey;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
/**
* Map task fast merging of ORC files.
*/
@@ -96,31 +100,9 @@ public class OrcFileMergeMapper extends
.inspector(reader.getObjectInspector()));
}
- // check compatibility with subsequent files
- if ((k.getTypes().get(0).getSubtypesCount() != columnCount)) {
- throw new IOException("ORCFileMerge failed because the input files are not compatible."
- + " Column counts does not match.");
- }
-
- if (!k.compression.equals(compression)) {
- throw new IOException("ORCFileMerge failed because the input files are not compatible."
- + " Compression codec does not match.");
- }
-
- if (k.compressBufferSize != compressBuffSize) {
- throw new IOException("ORCFileMerge failed because the input files are not compatible."
- + " Compression buffer size does not match.");
-
- }
-
- if (!k.versionList.equals(version)) {
- throw new IOException("ORCFileMerge failed because the input files are not compatible."
- + " Version does not match.");
- }
-
- if (k.rowIndexStride != rowIndexStride) {
- throw new IOException("ORCFileMerge failed because the input files are not compatible."
- + " Row index stride does not match.");
+ if (!checkCompatibility(k, value)) {
+ incompatFileSet.add(k.getInputPath());
+ return;
}
// next file in the path
@@ -153,6 +135,43 @@ public class OrcFileMergeMapper extends
}
}
+ private boolean checkCompatibility(OrcFileKeyWrapper k,
+ OrcFileValueWrapper value) {
+ // check compatibility with subsequent files
+ if ((k.getTypes().get(0).getSubtypesCount() != columnCount)) {
+ LOG.info("Incompatible ORC file merge! Column counts does not match for "
+ + k.getInputPath());
+ return false;
+ }
+
+ if (!k.compression.equals(compression)) {
+ LOG.info("Incompatible ORC file merge! Compression codec does not match" +
+ " for " + k.getInputPath());
+ return false;
+ }
+
+ if (k.compressBufferSize != compressBuffSize) {
+ LOG.info("Incompatible ORC file merge! Compression buffer size does not" +
+ " match for " + k.getInputPath());
+ return false;
+
+ }
+
+ if (!k.versionList.equals(version)) {
+ LOG.info("Incompatible ORC file merge! Version does not match for "
+ + k.getInputPath());
+ return false;
+ }
+
+ if (k.rowIndexStride != rowIndexStride) {
+ LOG.info("Incompatible ORC file merge! Row index stride does not match" +
+ " for " + k.getInputPath());
+ return false;
+ }
+
+ return true;
+ }
+
@Override
public void close() throws IOException {
// close writer
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java Thu Aug 28 03:15:13 2014
@@ -2251,6 +2251,8 @@ class RecordReaderImpl implements Record
return ((DateColumnStatistics) index).getMaximum();
} else if (index instanceof DecimalColumnStatistics) {
return ((DecimalColumnStatistics) index).getMaximum();
+ } else if (index instanceof TimestampColumnStatistics) {
+ return ((TimestampColumnStatistics) index).getMaximum();
} else {
return null;
}
@@ -2273,6 +2275,8 @@ class RecordReaderImpl implements Record
return ((DateColumnStatistics) index).getMinimum();
} else if (index instanceof DecimalColumnStatistics) {
return ((DecimalColumnStatistics) index).getMinimum();
+ } else if (index instanceof TimestampColumnStatistics) {
+ return ((TimestampColumnStatistics) index).getMinimum();
} else {
return null;
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java Thu Aug 28 03:15:13 2014
@@ -1317,6 +1317,7 @@ class WriterImpl implements Writer, Memo
Timestamp val =
((TimestampObjectInspector) inspector).
getPrimitiveJavaObject(obj);
+ indexStatistics.updateTimestamp(val);
seconds.write((val.getTime() / MILLIS_PER_SECOND) - BASE_TIMESTAMP);
nanos.write(formatNanos(val.getNanos()));
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java?rev=1621031&r1=1621030&r2=1621031&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java Thu Aug 28 03:15:13 2014
@@ -47,7 +47,8 @@ public interface PredicateLeaf {
FLOAT, // float and double
STRING, // string, char, varchar
DATE,
- DECIMAL
+ DECIMAL,
+ TIMESTAMP
}
/**