You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/07/31 00:22:46 UTC

svn commit: r1508669 [3/39] - in /hive/branches/vectorization: ./ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/test/results/clientpositive/ data/files/ eclipse-templates/ hcatalog/build-support/ant/ hcatalog/core/src/main/java/org/apa...

Modified: hive/branches/vectorization/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (original)
+++ hive/branches/vectorization/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java Tue Jul 30 22:22:35 2013
@@ -2415,6 +2415,401 @@ public final class OrcProto {
     // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DecimalStatistics)
   }
   
+  public interface DateStatisticsOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // optional sint32 minimum = 1;
+    boolean hasMinimum();
+    int getMinimum();
+    
+    // optional sint32 maximum = 2;
+    boolean hasMaximum();
+    int getMaximum();
+  }
+  public static final class DateStatistics extends
+      com.google.protobuf.GeneratedMessage
+      implements DateStatisticsOrBuilder {
+    // Use DateStatistics.newBuilder() to construct.
+    private DateStatistics(Builder builder) {
+      super(builder);
+    }
+    private DateStatistics(boolean noInit) {}
+    
+    private static final DateStatistics defaultInstance;
+    public static DateStatistics getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public DateStatistics getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
+    }
+    
+    private int bitField0_;
+    // optional sint32 minimum = 1;
+    public static final int MINIMUM_FIELD_NUMBER = 1;
+    private int minimum_;
+    public boolean hasMinimum() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public int getMinimum() {
+      return minimum_;
+    }
+    
+    // optional sint32 maximum = 2;
+    public static final int MAXIMUM_FIELD_NUMBER = 2;
+    private int maximum_;
+    public boolean hasMaximum() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public int getMaximum() {
+      return maximum_;
+    }
+    
+    private void initFields() {
+      minimum_ = 0;
+      maximum_ = 0;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeSInt32(1, minimum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeSInt32(2, maximum_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt32Size(1, minimum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt32Size(2, maximum_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
+      }
+      
+      // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        minimum_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        maximum_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDefaultInstanceForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      }
+      
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics build() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics buildPartial() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.minimum_ = minimum_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.maximum_ = maximum_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics) {
+          return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics other) {
+        if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance()) return this;
+        if (other.hasMinimum()) {
+          setMinimum(other.getMinimum());
+        }
+        if (other.hasMaximum()) {
+          setMaximum(other.getMaximum());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              minimum_ = input.readSInt32();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              maximum_ = input.readSInt32();
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // optional sint32 minimum = 1;
+      private int minimum_ ;
+      public boolean hasMinimum() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public int getMinimum() {
+        return minimum_;
+      }
+      public Builder setMinimum(int value) {
+        bitField0_ |= 0x00000001;
+        minimum_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearMinimum() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        minimum_ = 0;
+        onChanged();
+        return this;
+      }
+      
+      // optional sint32 maximum = 2;
+      private int maximum_ ;
+      public boolean hasMaximum() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public int getMaximum() {
+        return maximum_;
+      }
+      public Builder setMaximum(int value) {
+        bitField0_ |= 0x00000002;
+        maximum_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearMaximum() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        maximum_ = 0;
+        onChanged();
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+    }
+    
+    static {
+      defaultInstance = new DateStatistics(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+  }
+  
   public interface ColumnStatisticsOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
@@ -2446,6 +2841,11 @@ public final class OrcProto {
     boolean hasDecimalStatistics();
     org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics getDecimalStatistics();
     org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatisticsOrBuilder getDecimalStatisticsOrBuilder();
+    
+    // optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;
+    boolean hasDateStatistics();
+    org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDateStatistics();
+    org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder();
   }
   public static final class ColumnStatistics extends
       com.google.protobuf.GeneratedMessage
@@ -2551,6 +2951,19 @@ public final class OrcProto {
       return decimalStatistics_;
     }
     
+    // optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;
+    public static final int DATESTATISTICS_FIELD_NUMBER = 7;
+    private org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics dateStatistics_;
+    public boolean hasDateStatistics() {
+      return ((bitField0_ & 0x00000040) == 0x00000040);
+    }
+    public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDateStatistics() {
+      return dateStatistics_;
+    }
+    public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder() {
+      return dateStatistics_;
+    }
+    
     private void initFields() {
       numberOfValues_ = 0L;
       intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance();
@@ -2558,6 +2971,7 @@ public final class OrcProto {
       stringStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.StringStatistics.getDefaultInstance();
       bucketStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BucketStatistics.getDefaultInstance();
       decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance();
+      dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2589,6 +3003,9 @@ public final class OrcProto {
       if (((bitField0_ & 0x00000020) == 0x00000020)) {
         output.writeMessage(6, decimalStatistics_);
       }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        output.writeMessage(7, dateStatistics_);
+      }
       getUnknownFields().writeTo(output);
     }
     
@@ -2622,6 +3039,10 @@ public final class OrcProto {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(6, decimalStatistics_);
       }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(7, dateStatistics_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -2743,6 +3164,7 @@ public final class OrcProto {
           getStringStatisticsFieldBuilder();
           getBucketStatisticsFieldBuilder();
           getDecimalStatisticsFieldBuilder();
+          getDateStatisticsFieldBuilder();
         }
       }
       private static Builder create() {
@@ -2783,6 +3205,12 @@ public final class OrcProto {
           decimalStatisticsBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000020);
+        if (dateStatisticsBuilder_ == null) {
+          dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+        } else {
+          dateStatisticsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000040);
         return this;
       }
       
@@ -2865,6 +3293,14 @@ public final class OrcProto {
         } else {
           result.decimalStatistics_ = decimalStatisticsBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+          to_bitField0_ |= 0x00000040;
+        }
+        if (dateStatisticsBuilder_ == null) {
+          result.dateStatistics_ = dateStatistics_;
+        } else {
+          result.dateStatistics_ = dateStatisticsBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -2899,6 +3335,9 @@ public final class OrcProto {
         if (other.hasDecimalStatistics()) {
           mergeDecimalStatistics(other.getDecimalStatistics());
         }
+        if (other.hasDateStatistics()) {
+          mergeDateStatistics(other.getDateStatistics());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -2980,6 +3419,15 @@ public final class OrcProto {
               setDecimalStatistics(subBuilder.buildPartial());
               break;
             }
+            case 58: {
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder subBuilder = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder();
+              if (hasDateStatistics()) {
+                subBuilder.mergeFrom(getDateStatistics());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setDateStatistics(subBuilder.buildPartial());
+              break;
+            }
           }
         }
       }
@@ -3457,6 +3905,96 @@ public final class OrcProto {
         return decimalStatisticsBuilder_;
       }
       
+      // optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;
+      private org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder> dateStatisticsBuilder_;
+      public boolean hasDateStatistics() {
+        return ((bitField0_ & 0x00000040) == 0x00000040);
+      }
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDateStatistics() {
+        if (dateStatisticsBuilder_ == null) {
+          return dateStatistics_;
+        } else {
+          return dateStatisticsBuilder_.getMessage();
+        }
+      }
+      public Builder setDateStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics value) {
+        if (dateStatisticsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          dateStatistics_ = value;
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000040;
+        return this;
+      }
+      public Builder setDateStatistics(
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder builderForValue) {
+        if (dateStatisticsBuilder_ == null) {
+          dateStatistics_ = builderForValue.build();
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000040;
+        return this;
+      }
+      public Builder mergeDateStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics value) {
+        if (dateStatisticsBuilder_ == null) {
+          if (((bitField0_ & 0x00000040) == 0x00000040) &&
+              dateStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance()) {
+            dateStatistics_ =
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder(dateStatistics_).mergeFrom(value).buildPartial();
+          } else {
+            dateStatistics_ = value;
+          }
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000040;
+        return this;
+      }
+      public Builder clearDateStatistics() {
+        if (dateStatisticsBuilder_ == null) {
+          dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000040);
+        return this;
+      }
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder getDateStatisticsBuilder() {
+        bitField0_ |= 0x00000040;
+        onChanged();
+        return getDateStatisticsFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder() {
+        if (dateStatisticsBuilder_ != null) {
+          return dateStatisticsBuilder_.getMessageOrBuilder();
+        } else {
+          return dateStatistics_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder> 
+          getDateStatisticsFieldBuilder() {
+        if (dateStatisticsBuilder_ == null) {
+          dateStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder>(
+                  dateStatistics_,
+                  getParentForChildren(),
+                  isClean());
+          dateStatistics_ = null;
+        }
+        return dateStatisticsBuilder_;
+      }
+      
       // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics)
     }
     
@@ -6505,6 +7043,7 @@ public final class OrcProto {
       STRUCT(12, 12),
       UNION(13, 13),
       DECIMAL(14, 14),
+      DATE(15, 15),
       ;
       
       public static final int BOOLEAN_VALUE = 0;
@@ -6522,6 +7061,7 @@ public final class OrcProto {
       public static final int STRUCT_VALUE = 12;
       public static final int UNION_VALUE = 13;
       public static final int DECIMAL_VALUE = 14;
+      public static final int DATE_VALUE = 15;
       
       
       public final int getNumber() { return value; }
@@ -6543,6 +7083,7 @@ public final class OrcProto {
           case 12: return STRUCT;
           case 13: return UNION;
           case 14: return DECIMAL;
+          case 15: return DATE;
           default: return null;
         }
       }
@@ -6573,7 +7114,7 @@ public final class OrcProto {
       }
       
       private static final Kind[] VALUES = {
-        BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP, LIST, MAP, STRUCT, UNION, DECIMAL, 
+        BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP, LIST, MAP, STRUCT, UNION, DECIMAL, DATE, 
       };
       
       public static Kind valueOf(
@@ -10476,6 +11017,11 @@ public final class OrcProto {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_org_apache_hadoop_hive_ql_io_orc_DecimalStatistics_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -10547,62 +11093,66 @@ public final class OrcProto {
       "istics\022\017\n\007minimum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t" +
       "\"%\n\020BucketStatistics\022\021\n\005count\030\001 \003(\004B\002\020\001\"" +
       "B\n\021DecimalStatistics\022\017\n\007minimum\030\001 \001(\t\022\017\n" +
-      "\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t\"\260\003\n\020ColumnSt" +
-      "atistics\022\026\n\016numberOfValues\030\001 \001(\004\022J\n\rintS",
-      "tatistics\030\002 \001(\01323.org.apache.hadoop.hive" +
-      ".ql.io.orc.IntegerStatistics\022L\n\020doubleSt" +
-      "atistics\030\003 \001(\01322.org.apache.hadoop.hive." +
-      "ql.io.orc.DoubleStatistics\022L\n\020stringStat" +
-      "istics\030\004 \001(\01322.org.apache.hadoop.hive.ql" +
-      ".io.orc.StringStatistics\022L\n\020bucketStatis" +
-      "tics\030\005 \001(\01322.org.apache.hadoop.hive.ql.i" +
-      "o.orc.BucketStatistics\022N\n\021decimalStatist" +
-      "ics\030\006 \001(\01323.org.apache.hadoop.hive.ql.io" +
-      ".orc.DecimalStatistics\"n\n\rRowIndexEntry\022",
-      "\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstatistics\030\002 \001" +
-      "(\01322.org.apache.hadoop.hive.ql.io.orc.Co" +
-      "lumnStatistics\"J\n\010RowIndex\022>\n\005entry\030\001 \003(" +
-      "\0132/.org.apache.hadoop.hive.ql.io.orc.Row" +
-      "IndexEntry\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(\0162-.or" +
-      "g.apache.hadoop.hive.ql.io.orc.Stream.Ki" +
-      "nd\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003 \001(\004\"r\n\004Ki" +
-      "nd\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGTH\020\002\022\023\n" +
-      "\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONARY_COUNT\020\004" +
-      "\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006\"\221\001\n\016Colum",
-      "nEncoding\022C\n\004kind\030\001 \002(\01625.org.apache.had" +
-      "oop.hive.ql.io.orc.ColumnEncoding.Kind\022\026" +
-      "\n\016dictionarySize\030\002 \001(\r\"\"\n\004Kind\022\n\n\006DIRECT" +
-      "\020\000\022\016\n\nDICTIONARY\020\001\"\214\001\n\014StripeFooter\0229\n\007s" +
-      "treams\030\001 \003(\0132(.org.apache.hadoop.hive.ql" +
-      ".io.orc.Stream\022A\n\007columns\030\002 \003(\01320.org.ap" +
-      "ache.hadoop.hive.ql.io.orc.ColumnEncodin" +
-      "g\"\236\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apache.ha" +
-      "doop.hive.ql.io.orc.Type.Kind\022\024\n\010subtype" +
-      "s\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\"\260\001\n\004Kind",
-      "\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003IN" +
-      "T\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006" +
-      "STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004L" +
-      "IST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022\013\n" +
-      "\007DECIMAL\020\016\"x\n\021StripeInformation\022\016\n\006offse" +
-      "t\030\001 \001(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\ndataLeng" +
-      "th\030\003 \001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014number" +
-      "OfRows\030\005 \001(\004\"/\n\020UserMetadataItem\022\014\n\004name" +
-      "\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"\356\002\n\006Footer\022\024\n\014head" +
-      "erLength\030\001 \001(\004\022\025\n\rcontentLength\030\002 \001(\004\022D\n",
-      "\007stripes\030\003 \003(\01323.org.apache.hadoop.hive." +
-      "ql.io.orc.StripeInformation\0225\n\005types\030\004 \003" +
-      "(\0132&.org.apache.hadoop.hive.ql.io.orc.Ty" +
-      "pe\022D\n\010metadata\030\005 \003(\01322.org.apache.hadoop" +
-      ".hive.ql.io.orc.UserMetadataItem\022\024\n\014numb" +
-      "erOfRows\030\006 \001(\004\022F\n\nstatistics\030\007 \003(\01322.org" +
-      ".apache.hadoop.hive.ql.io.orc.ColumnStat" +
-      "istics\022\026\n\016rowIndexStride\030\010 \001(\r\"\255\001\n\nPostS" +
-      "cript\022\024\n\014footerLength\030\001 \001(\004\022F\n\013compressi" +
-      "on\030\002 \001(\01621.org.apache.hadoop.hive.ql.io.",
-      "orc.CompressionKind\022\034\n\024compressionBlockS" +
-      "ize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001\022\016\n\005magic\030" +
-      "\300> \001(\t*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010\n\004Z" +
-      "LIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
+      "\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t\"2\n\016DateStati" +
+      "stics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007maximum\030\002 \001(\021\"",
+      "\372\003\n\020ColumnStatistics\022\026\n\016numberOfValues\030\001" +
+      " \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org.apache" +
+      ".hadoop.hive.ql.io.orc.IntegerStatistics" +
+      "\022L\n\020doubleStatistics\030\003 \001(\01322.org.apache." +
+      "hadoop.hive.ql.io.orc.DoubleStatistics\022L" +
+      "\n\020stringStatistics\030\004 \001(\01322.org.apache.ha" +
+      "doop.hive.ql.io.orc.StringStatistics\022L\n\020" +
+      "bucketStatistics\030\005 \001(\01322.org.apache.hado" +
+      "op.hive.ql.io.orc.BucketStatistics\022N\n\021de" +
+      "cimalStatistics\030\006 \001(\01323.org.apache.hadoo",
+      "p.hive.ql.io.orc.DecimalStatistics\022H\n\016da" +
+      "teStatistics\030\007 \001(\01320.org.apache.hadoop.h" +
+      "ive.ql.io.orc.DateStatistics\"n\n\rRowIndex" +
+      "Entry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstatisti" +
+      "cs\030\002 \001(\01322.org.apache.hadoop.hive.ql.io." +
+      "orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005entr" +
+      "y\030\001 \003(\0132/.org.apache.hadoop.hive.ql.io.o" +
+      "rc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(" +
+      "\0162-.org.apache.hadoop.hive.ql.io.orc.Str" +
+      "eam.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003 \001(\004",
+      "\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGT" +
+      "H\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONARY_C" +
+      "OUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006\"\221\001\n" +
+      "\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org.apac" +
+      "he.hadoop.hive.ql.io.orc.ColumnEncoding." +
+      "Kind\022\026\n\016dictionarySize\030\002 \001(\r\"\"\n\004Kind\022\n\n\006" +
+      "DIRECT\020\000\022\016\n\nDICTIONARY\020\001\"\214\001\n\014StripeFoote" +
+      "r\0229\n\007streams\030\001 \003(\0132(.org.apache.hadoop.h" +
+      "ive.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320." +
+      "org.apache.hadoop.hive.ql.io.orc.ColumnE",
+      "ncoding\"\250\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apa" +
+      "che.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010s" +
+      "ubtypes\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\"\272\001" +
+      "\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002" +
+      "\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE" +
+      "\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020" +
+      "\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNIO" +
+      "N\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\"x\n\021StripeInfo" +
+      "rmation\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002" +
+      " \001(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength",
+      "\030\004 \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMeta" +
+      "dataItem\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"\356\002" +
+      "\n\006Footer\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rconten" +
+      "tLength\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apa" +
+      "che.hadoop.hive.ql.io.orc.StripeInformat" +
+      "ion\0225\n\005types\030\004 \003(\0132&.org.apache.hadoop.h" +
+      "ive.ql.io.orc.Type\022D\n\010metadata\030\005 \003(\01322.o" +
+      "rg.apache.hadoop.hive.ql.io.orc.UserMeta" +
+      "dataItem\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatis" +
+      "tics\030\007 \003(\01322.org.apache.hadoop.hive.ql.i",
+      "o.orc.ColumnStatistics\022\026\n\016rowIndexStride" +
+      "\030\010 \001(\r\"\255\001\n\nPostScript\022\024\n\014footerLength\030\001 " +
+      "\001(\004\022F\n\013compression\030\002 \001(\01621.org.apache.ha" +
+      "doop.hive.ql.io.orc.CompressionKind\022\034\n\024c" +
+      "ompressionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003" +
+      "(\rB\002\020\001\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKin" +
+      "d\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO" +
+      "\020\003"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10649,16 +11199,24 @@ public final class OrcProto {
               new java.lang.String[] { "Minimum", "Maximum", "Sum", },
               org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.Builder.class);
-          internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
+          internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor =
             getDescriptor().getMessageTypes().get(5);
+          internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor,
+              new java.lang.String[] { "Minimum", "Maximum", },
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.class,
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder.class);
+          internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
+            getDescriptor().getMessageTypes().get(6);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor,
-              new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", },
+              new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", },
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnStatistics.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnStatistics.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor =
-            getDescriptor().getMessageTypes().get(6);
+            getDescriptor().getMessageTypes().get(7);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor,
@@ -10666,7 +11224,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndexEntry.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndexEntry.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor =
-            getDescriptor().getMessageTypes().get(7);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor,
@@ -10674,7 +11232,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndex.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndex.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor =
-            getDescriptor().getMessageTypes().get(8);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor,
@@ -10682,7 +11240,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Stream.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Stream.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor =
-            getDescriptor().getMessageTypes().get(9);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor,
@@ -10690,7 +11248,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnEncoding.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnEncoding.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor,
@@ -10698,7 +11256,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor,
@@ -10706,7 +11264,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Type.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Type.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor,
@@ -10714,7 +11272,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeInformation.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeInformation.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor,
@@ -10722,7 +11280,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.UserMetadataItem.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.UserMetadataItem.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor,
@@ -10730,7 +11288,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Footer.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Footer.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor,

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Tue Jul 30 22:22:35 2013
@@ -460,12 +460,12 @@ public class Driver implements CommandPr
 
         // serialize the queryPlan
         FileOutputStream fos = new FileOutputStream(queryPlanFileName);
-        Utilities.serializeQueryPlan(plan, fos);
+        Utilities.serializeObject(plan, fos);
         fos.close();
 
         // deserialize the queryPlan
         FileInputStream fis = new FileInputStream(queryPlanFileName);
-        QueryPlan newPlan = Utilities.deserializeQueryPlan(fis, conf);
+        QueryPlan newPlan = Utilities.deserializeObject(fis);
         fis.close();
 
         // Use the deserialized plan
@@ -878,14 +878,17 @@ public class Driver implements CommandPr
 
   public CommandProcessorResponse run(String command) throws CommandNeedRetryException {
     CommandProcessorResponse cpr = runInternal(command);
-    if(cpr.getResponseCode() == 0) 
+    if(cpr.getResponseCode() == 0) {
       return cpr;
+    }
     SessionState ss = SessionState.get();
-    if(ss == null) 
+    if(ss == null) {
       return cpr;
+    }
     MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf());
-    if(!(mdf instanceof JsonMetaDataFormatter)) 
+    if(!(mdf instanceof JsonMetaDataFormatter)) {
       return cpr;
+    }
     /*Here we want to encode the error in machine readable way (e.g. JSON)
      * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg.
      * In practice that is rarely the case, so the messy logic below tries to tease

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Tue Jul 30 22:22:35 2013
@@ -172,8 +172,8 @@ public enum ErrorMsg {
   DYNAMIC_PARTITION_STRICT_MODE(10096, "Dynamic partition strict mode requires at least one "
       + "static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict"),
   NONEXISTPARTCOL(10098, "Non-Partition column appears in the partition specification: "),
-  UNSUPPORTED_TYPE(10099, "DATE and DATETIME types aren't supported yet. Please use "
-      + "TIMESTAMP instead"),
+  UNSUPPORTED_TYPE(10099, "DATETIME type isn't supported yet. Please use "
+      + "DATE or TIMESTAMP instead"),
   CREATE_NON_NATIVE_AS(10100, "CREATE TABLE AS SELECT cannot be used for a non-native table"),
   LOAD_INTO_NON_NATIVE(10101, "A non-native table cannot be used as target for LOAD"),
   LOCKMGR_NOT_SPECIFIED(10102, "Lock manager not specified correctly, set hive.lock.manager"),

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java Tue Jul 30 22:22:35 2013
@@ -233,7 +233,7 @@ public class QueryPlan implements Serial
         mapTask.setTaskId(stage.getStageId() + "_MAP");
         mapTask.setTaskType(TaskType.MAP);
         stage.addToTaskList(mapTask);
-        populateOperatorGraph(mapTask, mrTask.getWork().getAliasToWork()
+        populateOperatorGraph(mapTask, mrTask.getWork().getMapWork().getAliasToWork()
             .values());
 
         // populate reduce task
@@ -245,7 +245,7 @@ public class QueryPlan implements Serial
           stage.addToTaskList(reduceTask);
           Collection<Operator<? extends OperatorDesc>> reducerTopOps =
             new ArrayList<Operator<? extends OperatorDesc>>();
-          reducerTopOps.add(mrTask.getWork().getReducer());
+          reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer());
           populateOperatorGraph(reduceTask, reducerTopOps);
         }
       } else {
@@ -382,7 +382,7 @@ public class QueryPlan implements Serial
       }
       if (task instanceof ExecDriver) {
         ExecDriver mrTask = (ExecDriver) task;
-        extractOperatorCounters(mrTask.getWork().getAliasToWork().values(),
+        extractOperatorCounters(mrTask.getWork().getMapWork().getAliasToWork().values(),
             task.getId() + "_MAP");
         if (mrTask.mapStarted()) {
           started.add(task.getId() + "_MAP");
@@ -393,7 +393,7 @@ public class QueryPlan implements Serial
         if (mrTask.hasReduce()) {
           Collection<Operator<? extends OperatorDesc>> reducerTopOps =
             new ArrayList<Operator<? extends OperatorDesc>>();
-          reducerTopOps.add(mrTask.getWork().getReducer());
+          reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer());
           extractOperatorCounters(reducerTopOps, task.getId() + "_REDUCE");
           if (mrTask.reduceStarted()) {
             started.add(task.getId() + "_REDUCE");

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java Tue Jul 30 22:22:35 2013
@@ -121,8 +121,9 @@ public class ExplainTask extends Task<Ex
     }
 
     if (work.getParseContext() != null) {
-      JSONObject jsonPlan = outputMap(work.getParseContext().getTopOps(),
-          "LOGICAL PLAN", out, jsonOutput, work.getExtended(), 0);
+      out.print("LOGICAL PLAN");
+      JSONObject jsonPlan = outputMap(work.getParseContext().getTopOps(), true,
+                                      out, jsonOutput, work.getExtended(), 0);
       if (out != null) {
         out.println();
       }
@@ -228,19 +229,16 @@ public class ExplainTask extends Task<Ex
     return sb.toString();
   }
 
-  private JSONObject outputMap(Map<?, ?> mp, String header, PrintStream out,
+  private JSONObject outputMap(Map<?, ?> mp, boolean hasHeader, PrintStream out,
       boolean extended, boolean jsonOutput, int indent) throws Exception {
 
-    boolean first_el = true;
     TreeMap<Object, Object> tree = new TreeMap<Object, Object>();
     tree.putAll(mp);
     JSONObject json = jsonOutput ? new JSONObject() : null;
+    if (out != null && hasHeader && !mp.isEmpty()) {
+      out.println();
+    }
     for (Entry<?, ?> ent : tree.entrySet()) {
-      if (first_el && (out != null)) {
-        out.println(header);
-      }
-      first_el = false;
-
       // Print the key
       if (out != null) {
         out.print(indentString(indent));
@@ -286,7 +284,7 @@ public class ExplainTask extends Task<Ex
     return jsonOutput ? json : null;
   }
 
-  private JSONArray outputList(List<?> l, String header, PrintStream out,
+  private JSONArray outputList(List<?> l, PrintStream out, boolean hasHeader,
       boolean extended, boolean jsonOutput, int indent) throws Exception {
 
     boolean first_el = true;
@@ -294,10 +292,6 @@ public class ExplainTask extends Task<Ex
     JSONArray outputArray = new JSONArray();
 
     for (Object o : l) {
-      if (first_el && (out != null)) {
-        out.print(header);
-      }
-
       if (isPrintable(o)) {
         String delim = first_el ? " " : ", ";
         if (out != null) {
@@ -311,11 +305,11 @@ public class ExplainTask extends Task<Ex
         nl = true;
       }
       else if (o instanceof Serializable) {
-        if (first_el && (out != null)) {
+        if (first_el && (out != null) && hasHeader) {
           out.println();
         }
         JSONObject jsonOut = outputPlan((Serializable) o, out, extended,
-            jsonOutput, jsonOutput ? 0 : indent + 2);
+            jsonOutput, jsonOutput ? 0 : (hasHeader ? indent + 2 : indent));
         if (jsonOutput) {
           outputArray.put(jsonOut);
         }
@@ -439,10 +433,14 @@ public class ExplainTask extends Task<Ex
           }
 
           String header = null;
+          boolean skipHeader = xpl_note.skipHeader();
+          boolean emptyHeader = false;
+
           if (!xpl_note.displayName().equals("")) {
             header = indentString(prop_indents) + xpl_note.displayName() + ":";
           }
           else {
+            emptyHeader = true;
             prop_indents = indent;
             header = indentString(prop_indents);
           }
@@ -450,7 +448,9 @@ public class ExplainTask extends Task<Ex
           // Try the output as a primitive object
           if (isPrintable(val)) {
             if (out != null && shouldPrint(xpl_note, val)) {
-              out.printf("%s ", header);
+              if (!skipHeader) {
+                out.printf("%s ", header);
+              }
               out.println(val);
             }
             if (jsonOutput) {
@@ -458,12 +458,26 @@ public class ExplainTask extends Task<Ex
             }
             continue;
           }
+
+          int ind = 0;
+          if (!jsonOutput) {
+            if (!skipHeader) {
+              ind = prop_indents + 2;
+            } else {
+              ind = indent;
+            }
+          }
+
           // Try this as a map
           try {
             // Go through the map and print out the stuff
             Map<?, ?> mp = (Map<?, ?>) val;
-            JSONObject jsonOut = outputMap(mp, header, out, extended, jsonOutput,
-                jsonOutput ? 0 : prop_indents + 2);
+
+            if (out != null && !skipHeader && mp != null && !mp.isEmpty()) {
+              out.print(header);
+            }
+
+            JSONObject jsonOut = outputMap(mp, !skipHeader && !emptyHeader, out, extended, jsonOutput, ind);
             if (jsonOutput) {
               json.put(header, jsonOut);
             }
@@ -476,8 +490,12 @@ public class ExplainTask extends Task<Ex
           // Try this as a list
           try {
             List<?> l = (List<?>) val;
-            JSONArray jsonOut = outputList(l, header, out, extended, jsonOutput,
-                jsonOutput ? 0 : prop_indents + 2);
+
+            if (out != null && !skipHeader && l != null && !l.isEmpty()) {
+              out.print(header);
+            }
+
+            JSONArray jsonOut = outputList(l, out, !skipHeader && !emptyHeader, extended, jsonOutput, ind);
 
             if (jsonOutput) {
               json.put(header, jsonOut);
@@ -492,11 +510,11 @@ public class ExplainTask extends Task<Ex
           // Finally check if it is serializable
           try {
             Serializable s = (Serializable) val;
-            if (out != null) {
+
+            if (!skipHeader && out != null) {
               out.println(header);
             }
-            JSONObject jsonOut = outputPlan(s, out, extended, jsonOutput,
-                jsonOutput ? 0 : prop_indents + 2);
+            JSONObject jsonOut = outputPlan(s, out, extended, jsonOutput, ind);
             if (jsonOutput) {
               json.put(header, jsonOut);
             }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Tue Jul 30 22:22:35 2013
@@ -343,6 +343,8 @@ public final class FunctionRegistry {
     registerUDF(serdeConstants.STRING_TYPE_NAME, UDFToString.class, false,
         UDFToString.class.getSimpleName());
 
+    registerGenericUDF(serdeConstants.DATE_TYPE_NAME,
+        GenericUDFToDate.class);
     registerGenericUDF(serdeConstants.TIMESTAMP_TYPE_NAME,
         GenericUDFTimestamp.class);
     registerGenericUDF(serdeConstants.BINARY_TYPE_NAME,
@@ -707,6 +709,11 @@ public final class FunctionRegistry {
     if (from.equals(TypeInfoFactory.voidTypeInfo)) {
       return true;
     }
+    // Allow implicit String to Date conversion
+    if (from.equals(TypeInfoFactory.dateTypeInfo)
+        && to.equals(TypeInfoFactory.stringTypeInfo)) {
+      return true;
+    }
 
     if (from.equals(TypeInfoFactory.timestampTypeInfo)
         && to.equals(TypeInfoFactory.stringTypeInfo)) {
@@ -1267,7 +1274,8 @@ public final class FunctionRegistry {
         udfClass == UDFToDouble.class || udfClass == UDFToFloat.class ||
         udfClass == UDFToInteger.class || udfClass == UDFToLong.class ||
         udfClass == UDFToShort.class || udfClass == UDFToString.class ||
-        udfClass == GenericUDFTimestamp.class || udfClass == GenericUDFToBinary.class;
+        udfClass == GenericUDFTimestamp.class || udfClass == GenericUDFToBinary.class ||
+        udfClass == GenericUDFToDate.class;
   }
 
   /**

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Tue Jul 30 22:22:35 2013
@@ -36,7 +36,7 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -63,7 +63,7 @@ import org.apache.hadoop.util.StringUtil
  * different from regular operators in that it starts off by processing a
  * Writable data structure from a Table (instead of a Hive Object).
  **/
-public class MapOperator extends Operator<MapredWork> implements Serializable, Cloneable {
+public class MapOperator extends Operator<MapWork> implements Serializable, Cloneable {
 
   private static final long serialVersionUID = 1L;
 
@@ -229,14 +229,14 @@ public class MapOperator extends Operato
    * @param mrwork
    * @throws HiveException
    */
-  public void initializeAsRoot(Configuration hconf, MapredWork mrwork)
+  public void initializeAsRoot(Configuration hconf, MapWork mapWork)
       throws HiveException {
-    setConf(mrwork);
+    setConf(mapWork);
     setChildren(hconf);
     initialize(hconf, null);
   }
 
-  private MapOpCtx initObjectInspector(MapredWork conf,
+  private MapOpCtx initObjectInspector(MapWork conf,
       Configuration hconf, String onefile, Map<TableDesc, StructObjectInspector> convertedOI)
           throws HiveException,
       ClassNotFoundException, InstantiationException, IllegalAccessException,

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Tue Jul 30 22:22:35 2013
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.plan.Dy
 import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
+import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
@@ -306,9 +307,13 @@ public class MoveTask extends Task<MoveW
             // the directory this move task is moving
             if (task instanceof MapRedTask) {
               MapredWork work = (MapredWork)task.getWork();
-              bucketCols = work.getBucketedColsByDirectory().get(path);
-              sortCols = work.getSortedColsByDirectory().get(path);
-              numBuckets = work.getNumReduceTasks();
+              MapWork mapWork = work.getMapWork();
+              bucketCols = mapWork.getBucketedColsByDirectory().get(path);
+              sortCols = mapWork.getSortedColsByDirectory().get(path);
+              if (work.getReduceWork() != null) {
+                numBuckets = work.getReduceWork().getNumReduceTasks();
+              }
+
               if (bucketCols != null || sortCols != null) {
                 // This must be a final map reduce task (the task containing the file sink
                 // operator that writes the final output)

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPersistence.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPersistence.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPersistence.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPersistence.java Tue Jul 30 22:22:35 2013
@@ -442,16 +442,20 @@ public class PTFPersistence {
     @Override
     protected void reset(int startOffset) throws HiveException {
       PTFPersistence.lock(lock.writeLock());
+      currentSize = 0;
       try {
-        currentSize = 0;
-        for(int i=0; i < partitions.size() - 1; i++) {
-          PersistentByteBasedList p = (PersistentByteBasedList)
-                          partitions.remove(0);
-          reusableFiles.add(p.getFile());
-          partitionOffsets.remove(0);
+        for (int i = 0; i < partitions.size() - 1; i++) {
+          ByteBasedList p = partitions.get(i);
+          reusableFiles.add(((PersistentByteBasedList)p).getFile());
         }
-        partitions.get(0).reset(0);
-        partitionOffsets.set(0, currentSize);
+        ByteBasedList memstore = partitions.get(partitions.size() - 1);
+        memstore.reset(0);
+
+        partitions.clear();
+        partitionOffsets.clear();
+
+        partitions.add(memstore);
+        partitionOffsets.add(0);
       }
       finally {
         lock.writeLock().unlock();

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java?rev=1508669&r1=1508668&r2=1508669&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java Tue Jul 30 22:22:35 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.io.BytesWritabl
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 
@@ -532,22 +533,68 @@ public class ScriptOperator extends Oper
     }
   }
 
+  class CounterStatusProcessor {
+
+    private final String reporterPrefix;
+    private final String counterPrefix;
+    private final String statusPrefix;
+    private final Reporter reporter;
+
+    CounterStatusProcessor(Configuration hconf, Reporter reporter){
+      this.reporterPrefix = HiveConf.getVar(hconf, HiveConf.ConfVars.STREAMREPORTERPERFIX);
+      this.counterPrefix = reporterPrefix + "counter:";
+      this.statusPrefix = reporterPrefix + "status:";
+      this.reporter = reporter;
+    }
+
+    private boolean process(String line) {
+      if (line.startsWith(reporterPrefix)){
+        if (line.startsWith(counterPrefix)){
+          incrCounter(line);
+        }
+        if (line.startsWith(statusPrefix)){
+          setStatus(line);
+        }
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    private void incrCounter(String line) {
+      String  trimmedLine = line.substring(counterPrefix.length()).trim();
+      String[] columns = trimmedLine.split(",");
+      if (columns.length == 3) {
+        try {
+          reporter.incrCounter(columns[0], columns[1], Long.parseLong(columns[2]));
+        } catch (NumberFormatException e) {
+            LOG.warn("Cannot parse counter increment '" + columns[2] +
+                "' from line " + line);
+        }
+      } else {
+        LOG.warn("Cannot parse counter line: " + line);
+      }
+    }
+
+    private void setStatus(String line) {
+      reporter.setStatus(line.substring(statusPrefix.length()).trim());
+    }
+  }
   /**
    * The processor for stderr stream.
-   *
-   * TODO: In the future when we move to hadoop 0.18 and above, we should borrow
-   * the logic from HadoopStreaming: PipeMapRed.java MRErrorThread to support
-   * counters and status updates.
    */
   class ErrorStreamProcessor implements StreamProcessor {
     private long bytesCopied = 0;
     private final long maxBytes;
-
     private long lastReportTime;
+    private CounterStatusProcessor counterStatus;
 
     public ErrorStreamProcessor(int maxBytes) {
       this.maxBytes = maxBytes;
       lastReportTime = 0;
+      if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.STREAMREPORTERENABLED)){
+        counterStatus = new CounterStatusProcessor(hconf, reporter);
+      }
     }
 
     public void processLine(Writable line) throws HiveException {
@@ -571,6 +618,14 @@ public class ScriptOperator extends Oper
         reporter.progress();
       }
 
+      if (reporter != null) {
+        if (counterStatus != null) {
+          if (counterStatus.process(stringLine)) {
+            return;
+          }
+        }
+      }
+
       if ((maxBytes < 0) || (bytesCopied < maxBytes)) {
         System.err.println(stringLine);
       }
@@ -659,7 +714,7 @@ public class ScriptOperator extends Oper
     for (int i = 0; i < inArgs.length; i++) {
       finalArgv[wrapComponents.length + i] = inArgs[i];
     }
-    return (finalArgv);
+    return finalArgv;
   }
 
   // Code below shameless borrowed from Hadoop Streaming