You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2013/10/04 23:30:46 UTC

svn commit: r1529308 [4/10] - in /hive/branches/maven: ./ ant/src/org/apache/hadoop/hive/ant/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/src/test/ bin/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/...

Modified: hive/branches/maven/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (original)
+++ hive/branches/maven/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java Fri Oct  4 21:30:38 2013
@@ -1256,6 +1256,24 @@ public final class OrcProto {
      */
     com.google.protobuf.ByteString
         getMaximumBytes();
+
+    // optional sint64 sum = 3;
+    /**
+     * <code>optional sint64 sum = 3;</code>
+     *
+     * <pre>
+     * sum will store the total length of all strings in a stripe
+     * </pre>
+     */
+    boolean hasSum();
+    /**
+     * <code>optional sint64 sum = 3;</code>
+     *
+     * <pre>
+     * sum will store the total length of all strings in a stripe
+     * </pre>
+     */
+    long getSum();
   }
   /**
    * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.StringStatistics}
@@ -1318,6 +1336,11 @@ public final class OrcProto {
               maximum_ = input.readBytes();
               break;
             }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              sum_ = input.readSInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -1444,9 +1467,34 @@ public final class OrcProto {
       }
     }
 
+    // optional sint64 sum = 3;
+    public static final int SUM_FIELD_NUMBER = 3;
+    private long sum_;
+    /**
+     * <code>optional sint64 sum = 3;</code>
+     *
+     * <pre>
+     * sum will store the total length of all strings in a stripe
+     * </pre>
+     */
+    public boolean hasSum() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional sint64 sum = 3;</code>
+     *
+     * <pre>
+     * sum will store the total length of all strings in a stripe
+     * </pre>
+     */
+    public long getSum() {
+      return sum_;
+    }
+
     private void initFields() {
       minimum_ = "";
       maximum_ = "";
+      sum_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -1466,6 +1514,9 @@ public final class OrcProto {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeBytes(2, getMaximumBytes());
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeSInt64(3, sum_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -1483,6 +1534,10 @@ public final class OrcProto {
         size += com.google.protobuf.CodedOutputStream
           .computeBytesSize(2, getMaximumBytes());
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt64Size(3, sum_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -1603,6 +1658,8 @@ public final class OrcProto {
         bitField0_ = (bitField0_ & ~0x00000001);
         maximum_ = "";
         bitField0_ = (bitField0_ & ~0x00000002);
+        sum_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
@@ -1639,6 +1696,10 @@ public final class OrcProto {
           to_bitField0_ |= 0x00000002;
         }
         result.maximum_ = maximum_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.sum_ = sum_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -1665,6 +1726,9 @@ public final class OrcProto {
           maximum_ = other.maximum_;
           onChanged();
         }
+        if (other.hasSum()) {
+          setSum(other.getSum());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -1840,6 +1904,55 @@ public final class OrcProto {
         return this;
       }
 
+      // optional sint64 sum = 3;
+      private long sum_ ;
+      /**
+       * <code>optional sint64 sum = 3;</code>
+       *
+       * <pre>
+       * sum will store the total length of all strings in a stripe
+       * </pre>
+       */
+      public boolean hasSum() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional sint64 sum = 3;</code>
+       *
+       * <pre>
+       * sum will store the total length of all strings in a stripe
+       * </pre>
+       */
+      public long getSum() {
+        return sum_;
+      }
+      /**
+       * <code>optional sint64 sum = 3;</code>
+       *
+       * <pre>
+       * sum will store the total length of all strings in a stripe
+       * </pre>
+       */
+      public Builder setSum(long value) {
+        bitField0_ |= 0x00000004;
+        sum_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional sint64 sum = 3;</code>
+       *
+       * <pre>
+       * sum will store the total length of all strings in a stripe
+       * </pre>
+       */
+      public Builder clearSum() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        sum_ = 0L;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.StringStatistics)
     }
 
@@ -3269,25 +3382,502 @@ public final class OrcProto {
       return minimum_;
     }
 
-    // optional sint32 maximum = 2;
-    public static final int MAXIMUM_FIELD_NUMBER = 2;
-    private int maximum_;
+    // optional sint32 maximum = 2;
+    public static final int MAXIMUM_FIELD_NUMBER = 2;
+    private int maximum_;
+    /**
+     * <code>optional sint32 maximum = 2;</code>
+     */
+    public boolean hasMaximum() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional sint32 maximum = 2;</code>
+     */
+    public int getMaximum() {
+      return maximum_;
+    }
+
+    private void initFields() {
+      minimum_ = 0;
+      maximum_ = 0;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeSInt32(1, minimum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeSInt32(2, maximum_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt32Size(1, minimum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt32Size(2, maximum_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.DateStatistics}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        minimum_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        maximum_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+      }
+
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDefaultInstanceForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics build() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics buildPartial() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.minimum_ = minimum_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.maximum_ = maximum_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics) {
+          return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics other) {
+        if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance()) return this;
+        if (other.hasMinimum()) {
+          setMinimum(other.getMinimum());
+        }
+        if (other.hasMaximum()) {
+          setMaximum(other.getMaximum());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional sint32 minimum = 1;
+      private int minimum_ ;
+      /**
+       * <code>optional sint32 minimum = 1;</code>
+       *
+       * <pre>
+       * min,max values saved as days since epoch
+       * </pre>
+       */
+      public boolean hasMinimum() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional sint32 minimum = 1;</code>
+       *
+       * <pre>
+       * min,max values saved as days since epoch
+       * </pre>
+       */
+      public int getMinimum() {
+        return minimum_;
+      }
+      /**
+       * <code>optional sint32 minimum = 1;</code>
+       *
+       * <pre>
+       * min,max values saved as days since epoch
+       * </pre>
+       */
+      public Builder setMinimum(int value) {
+        bitField0_ |= 0x00000001;
+        minimum_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional sint32 minimum = 1;</code>
+       *
+       * <pre>
+       * min,max values saved as days since epoch
+       * </pre>
+       */
+      public Builder clearMinimum() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        minimum_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // optional sint32 maximum = 2;
+      private int maximum_ ;
+      /**
+       * <code>optional sint32 maximum = 2;</code>
+       */
+      public boolean hasMaximum() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional sint32 maximum = 2;</code>
+       */
+      public int getMaximum() {
+        return maximum_;
+      }
+      /**
+       * <code>optional sint32 maximum = 2;</code>
+       */
+      public Builder setMaximum(int value) {
+        bitField0_ |= 0x00000002;
+        maximum_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional sint32 maximum = 2;</code>
+       */
+      public Builder clearMaximum() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        maximum_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+    }
+
+    static {
+      defaultInstance = new DateStatistics(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+  }
+
+  public interface BinaryStatisticsOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional sint64 sum = 1;
+    /**
+     * <code>optional sint64 sum = 1;</code>
+     *
+     * <pre>
+     * sum will store the total binary blob length in a stripe
+     * </pre>
+     */
+    boolean hasSum();
+    /**
+     * <code>optional sint64 sum = 1;</code>
+     *
+     * <pre>
+     * sum will store the total binary blob length in a stripe
+     * </pre>
+     */
+    long getSum();
+  }
+  /**
+   * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.BinaryStatistics}
+   */
+  public static final class BinaryStatistics extends
+      com.google.protobuf.GeneratedMessage
+      implements BinaryStatisticsOrBuilder {
+    // Use BinaryStatistics.newBuilder() to construct.
+    private BinaryStatistics(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private BinaryStatistics(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final BinaryStatistics defaultInstance;
+    public static BinaryStatistics getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public BinaryStatistics getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private BinaryStatistics(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              sum_ = input.readSInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<BinaryStatistics> PARSER =
+        new com.google.protobuf.AbstractParser<BinaryStatistics>() {
+      public BinaryStatistics parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new BinaryStatistics(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<BinaryStatistics> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional sint64 sum = 1;
+    public static final int SUM_FIELD_NUMBER = 1;
+    private long sum_;
     /**
-     * <code>optional sint32 maximum = 2;</code>
+     * <code>optional sint64 sum = 1;</code>
+     *
+     * <pre>
+     * sum will store the total binary blob length in a stripe
+     * </pre>
      */
-    public boolean hasMaximum() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
+    public boolean hasSum() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>optional sint32 maximum = 2;</code>
+     * <code>optional sint64 sum = 1;</code>
+     *
+     * <pre>
+     * sum will store the total binary blob length in a stripe
+     * </pre>
      */
-    public int getMaximum() {
-      return maximum_;
+    public long getSum() {
+      return sum_;
     }
 
     private void initFields() {
-      minimum_ = 0;
-      maximum_ = 0;
+      sum_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -3302,10 +3892,7 @@ public final class OrcProto {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeSInt32(1, minimum_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeSInt32(2, maximum_);
+        output.writeSInt64(1, sum_);
       }
       getUnknownFields().writeTo(output);
     }
@@ -3318,11 +3905,7 @@ public final class OrcProto {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeSInt32Size(1, minimum_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeSInt32Size(2, maximum_);
+          .computeSInt64Size(1, sum_);
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -3336,53 +3919,53 @@ public final class OrcProto {
       return super.writeReplace();
     }
 
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(byte[] data)
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -3391,7 +3974,7 @@ public final class OrcProto {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics prototype) {
+    public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -3403,24 +3986,24 @@ public final class OrcProto {
       return builder;
     }
     /**
-     * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.DateStatistics}
+     * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.BinaryStatistics}
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder {
+       implements org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder.class);
+                org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder.class);
       }
 
-      // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder()
+      // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -3440,10 +4023,8 @@ public final class OrcProto {
 
       public Builder clear() {
         super.clear();
-        minimum_ = 0;
+        sum_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000001);
-        maximum_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000002);
         return this;
       }
 
@@ -3453,54 +4034,47 @@ public final class OrcProto {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
       }
 
-      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDefaultInstanceForType() {
-        return org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics getDefaultInstanceForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
       }
 
-      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics build() {
-        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = buildPartial();
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics build() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics buildPartial() {
-        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics(this);
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics buildPartial() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.minimum_ = minimum_;
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        result.maximum_ = maximum_;
+        result.sum_ = sum_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics) {
-          return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics)other);
+        if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics) {
+          return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics other) {
-        if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance()) return this;
-        if (other.hasMinimum()) {
-          setMinimum(other.getMinimum());
-        }
-        if (other.hasMaximum()) {
-          setMaximum(other.getMaximum());
+      public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics other) {
+        if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance()) return this;
+        if (other.hasSum()) {
+          setSum(other.getSum());
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
@@ -3514,11 +4088,11 @@ public final class OrcProto {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parsedMessage = null;
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -3529,97 +4103,64 @@ public final class OrcProto {
       }
       private int bitField0_;
 
-      // optional sint32 minimum = 1;
-      private int minimum_ ;
+      // optional sint64 sum = 1;
+      private long sum_ ;
       /**
-       * <code>optional sint32 minimum = 1;</code>
+       * <code>optional sint64 sum = 1;</code>
        *
        * <pre>
-       * min,max values saved as days since epoch
+       * sum will store the total binary blob length in a stripe
        * </pre>
        */
-      public boolean hasMinimum() {
+      public boolean hasSum() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>optional sint32 minimum = 1;</code>
+       * <code>optional sint64 sum = 1;</code>
        *
        * <pre>
-       * min,max values saved as days since epoch
+       * sum will store the total binary blob length in a stripe
        * </pre>
        */
-      public int getMinimum() {
-        return minimum_;
+      public long getSum() {
+        return sum_;
       }
       /**
-       * <code>optional sint32 minimum = 1;</code>
+       * <code>optional sint64 sum = 1;</code>
        *
        * <pre>
-       * min,max values saved as days since epoch
+       * sum will store the total binary blob length in a stripe
        * </pre>
        */
-      public Builder setMinimum(int value) {
+      public Builder setSum(long value) {
         bitField0_ |= 0x00000001;
-        minimum_ = value;
+        sum_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional sint32 minimum = 1;</code>
+       * <code>optional sint64 sum = 1;</code>
        *
        * <pre>
-       * min,max values saved as days since epoch
+       * sum will store the total binary blob length in a stripe
        * </pre>
        */
-      public Builder clearMinimum() {
+      public Builder clearSum() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        minimum_ = 0;
-        onChanged();
-        return this;
-      }
-
-      // optional sint32 maximum = 2;
-      private int maximum_ ;
-      /**
-       * <code>optional sint32 maximum = 2;</code>
-       */
-      public boolean hasMaximum() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>optional sint32 maximum = 2;</code>
-       */
-      public int getMaximum() {
-        return maximum_;
-      }
-      /**
-       * <code>optional sint32 maximum = 2;</code>
-       */
-      public Builder setMaximum(int value) {
-        bitField0_ |= 0x00000002;
-        maximum_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional sint32 maximum = 2;</code>
-       */
-      public Builder clearMaximum() {
-        bitField0_ = (bitField0_ & ~0x00000002);
-        maximum_ = 0;
+        sum_ = 0L;
         onChanged();
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.BinaryStatistics)
     }
 
     static {
-      defaultInstance = new DateStatistics(true);
+      defaultInstance = new BinaryStatistics(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.BinaryStatistics)
   }
 
   public interface ColumnStatisticsOrBuilder
@@ -3718,6 +4259,20 @@ public final class OrcProto {
      * <code>optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;</code>
      */
     org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder();
+
+    // optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;
+    /**
+     * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+     */
+    boolean hasBinaryStatistics();
+    /**
+     * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+     */
+    org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics getBinaryStatistics();
+    /**
+     * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+     */
+    org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder();
   }
   /**
    * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.ColumnStatistics}
@@ -3853,6 +4408,19 @@ public final class OrcProto {
               bitField0_ |= 0x00000040;
               break;
             }
+            case 66: {
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000080) == 0x00000080)) {
+                subBuilder = binaryStatistics_.toBuilder();
+              }
+              binaryStatistics_ = input.readMessage(org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(binaryStatistics_);
+                binaryStatistics_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000080;
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4041,6 +4609,28 @@ public final class OrcProto {
       return dateStatistics_;
     }
 
+    // optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;
+    public static final int BINARYSTATISTICS_FIELD_NUMBER = 8;
+    private org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics binaryStatistics_;
+    /**
+     * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+     */
+    public boolean hasBinaryStatistics() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+     */
+    public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics getBinaryStatistics() {
+      return binaryStatistics_;
+    }
+    /**
+     * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+     */
+    public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder() {
+      return binaryStatistics_;
+    }
+
     private void initFields() {
       numberOfValues_ = 0L;
       intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance();
@@ -4049,6 +4639,7 @@ public final class OrcProto {
       bucketStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BucketStatistics.getDefaultInstance();
       decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance();
       dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4083,6 +4674,9 @@ public final class OrcProto {
       if (((bitField0_ & 0x00000040) == 0x00000040)) {
         output.writeMessage(7, dateStatistics_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeMessage(8, binaryStatistics_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -4120,6 +4714,10 @@ public final class OrcProto {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(7, dateStatistics_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(8, binaryStatistics_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -4234,6 +4832,7 @@ public final class OrcProto {
           getBucketStatisticsFieldBuilder();
           getDecimalStatisticsFieldBuilder();
           getDateStatisticsFieldBuilder();
+          getBinaryStatisticsFieldBuilder();
         }
       }
       private static Builder create() {
@@ -4280,6 +4879,12 @@ public final class OrcProto {
           dateStatisticsBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000040);
+        if (binaryStatisticsBuilder_ == null) {
+          binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
+        } else {
+          binaryStatisticsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000080);
         return this;
       }
 
@@ -4360,6 +4965,14 @@ public final class OrcProto {
         } else {
           result.dateStatistics_ = dateStatisticsBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        if (binaryStatisticsBuilder_ == null) {
+          result.binaryStatistics_ = binaryStatistics_;
+        } else {
+          result.binaryStatistics_ = binaryStatisticsBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -4397,6 +5010,9 @@ public final class OrcProto {
         if (other.hasDateStatistics()) {
           mergeDateStatistics(other.getDateStatistics());
         }
+        if (other.hasBinaryStatistics()) {
+          mergeBinaryStatistics(other.getBinaryStatistics());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -5159,6 +5775,123 @@ public final class OrcProto {
         return dateStatisticsBuilder_;
       }
 
+      // optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;
+      private org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder> binaryStatisticsBuilder_;
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public boolean hasBinaryStatistics() {
+        return ((bitField0_ & 0x00000080) == 0x00000080);
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics getBinaryStatistics() {
+        if (binaryStatisticsBuilder_ == null) {
+          return binaryStatistics_;
+        } else {
+          return binaryStatisticsBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public Builder setBinaryStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics value) {
+        if (binaryStatisticsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          binaryStatistics_ = value;
+          onChanged();
+        } else {
+          binaryStatisticsBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000080;
+        return this;
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public Builder setBinaryStatistics(
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder builderForValue) {
+        if (binaryStatisticsBuilder_ == null) {
+          binaryStatistics_ = builderForValue.build();
+          onChanged();
+        } else {
+          binaryStatisticsBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000080;
+        return this;
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public Builder mergeBinaryStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics value) {
+        if (binaryStatisticsBuilder_ == null) {
+          if (((bitField0_ & 0x00000080) == 0x00000080) &&
+              binaryStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance()) {
+            binaryStatistics_ =
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.newBuilder(binaryStatistics_).mergeFrom(value).buildPartial();
+          } else {
+            binaryStatistics_ = value;
+          }
+          onChanged();
+        } else {
+          binaryStatisticsBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000080;
+        return this;
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public Builder clearBinaryStatistics() {
+        if (binaryStatisticsBuilder_ == null) {
+          binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
+          onChanged();
+        } else {
+          binaryStatisticsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000080);
+        return this;
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder getBinaryStatisticsBuilder() {
+        bitField0_ |= 0x00000080;
+        onChanged();
+        return getBinaryStatisticsFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder() {
+        if (binaryStatisticsBuilder_ != null) {
+          return binaryStatisticsBuilder_.getMessageOrBuilder();
+        } else {
+          return binaryStatistics_;
+        }
+      }
+      /**
+       * <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder> 
+          getBinaryStatisticsFieldBuilder() {
+        if (binaryStatisticsBuilder_ == null) {
+          binaryStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder>(
+                  binaryStatistics_,
+                  getParentForChildren(),
+                  isClean());
+          binaryStatistics_ = null;
+        }
+        return binaryStatisticsBuilder_;
+      }
+
       // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics)
     }
 
@@ -14321,6 +15054,11 @@ public final class OrcProto {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -14388,71 +15126,74 @@ public final class OrcProto {
       "e.ql.io.orc\"B\n\021IntegerStatistics\022\017\n\007mini" +
       "mum\030\001 \001(\022\022\017\n\007maximum\030\002 \001(\022\022\013\n\003sum\030\003 \001(\022\"" +
       "A\n\020DoubleStatistics\022\017\n\007minimum\030\001 \001(\001\022\017\n\007" +
-      "maximum\030\002 \001(\001\022\013\n\003sum\030\003 \001(\001\"4\n\020StringStat" +
+      "maximum\030\002 \001(\001\022\013\n\003sum\030\003 \001(\001\"A\n\020StringStat" +
       "istics\022\017\n\007minimum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t" +
-      "\"%\n\020BucketStatistics\022\021\n\005count\030\001 \003(\004B\002\020\001\"" +
-      "B\n\021DecimalStatistics\022\017\n\007minimum\030\001 \001(\t\022\017\n" +
-      "\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t\"2\n\016DateStati" +
-      "stics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007maximum\030\002 \001(\021\"",
-      "\372\003\n\020ColumnStatistics\022\026\n\016numberOfValues\030\001" +
-      " \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org.apache" +
-      ".hadoop.hive.ql.io.orc.IntegerStatistics" +
-      "\022L\n\020doubleStatistics\030\003 \001(\01322.org.apache." +
-      "hadoop.hive.ql.io.orc.DoubleStatistics\022L" +
-      "\n\020stringStatistics\030\004 \001(\01322.org.apache.ha" +
-      "doop.hive.ql.io.orc.StringStatistics\022L\n\020" +
-      "bucketStatistics\030\005 \001(\01322.org.apache.hado" +
-      "op.hive.ql.io.orc.BucketStatistics\022N\n\021de" +
-      "cimalStatistics\030\006 \001(\01323.org.apache.hadoo",
-      "p.hive.ql.io.orc.DecimalStatistics\022H\n\016da" +
-      "teStatistics\030\007 \001(\01320.org.apache.hadoop.h" +
-      "ive.ql.io.orc.DateStatistics\"n\n\rRowIndex" +
-      "Entry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstatisti" +
-      "cs\030\002 \001(\01322.org.apache.hadoop.hive.ql.io." +
-      "orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005entr" +
-      "y\030\001 \003(\0132/.org.apache.hadoop.hive.ql.io.o" +
-      "rc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(" +
-      "\0162-.org.apache.hadoop.hive.ql.io.orc.Str" +
-      "eam.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003 \001(\004",
-      "\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGT" +
-      "H\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONARY_C" +
-      "OUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006\"\263\001\n" +
-      "\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org.apac" +
-      "he.hadoop.hive.ql.io.orc.ColumnEncoding." +
-      "Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind\022\n\n\006" +
-      "DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V2\020\002\022" +
-      "\021\n\rDICTIONARY_V2\020\003\"\214\001\n\014StripeFooter\0229\n\007s" +
-      "treams\030\001 \003(\0132(.org.apache.hadoop.hive.ql" +
-      ".io.orc.Stream\022A\n\007columns\030\002 \003(\01320.org.ap",
-      "ache.hadoop.hive.ql.io.orc.ColumnEncodin" +
-      "g\"\314\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apache.ha" +
-      "doop.hive.ql.io.orc.Type.Kind\022\024\n\010subtype" +
-      "s\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rmaxim" +
-      "umLength\030\004 \001(\r\"\307\001\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004" +
-      "BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005" +
-      "FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006BINAR" +
-      "Y\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n" +
-      "\006STRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DAT" +
-      "E\020\017\022\013\n\007VARCHAR\020\020\"x\n\021StripeInformation\022\016\n",
-      "\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\nda" +
-      "taLength\030\003 \001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014" +
-      "numberOfRows\030\005 \001(\004\"/\n\020UserMetadataItem\022\014" +
-      "\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"\356\002\n\006Footer\022\024" +
-      "\n\014headerLength\030\001 \001(\004\022\025\n\rcontentLength\030\002 " +
-      "\001(\004\022D\n\007stripes\030\003 \003(\01323.org.apache.hadoop" +
-      ".hive.ql.io.orc.StripeInformation\0225\n\005typ" +
-      "es\030\004 \003(\0132&.org.apache.hadoop.hive.ql.io." +
-      "orc.Type\022D\n\010metadata\030\005 \003(\01322.org.apache." +
-      "hadoop.hive.ql.io.orc.UserMetadataItem\022\024",
-      "\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007 \003(\013" +
-      "22.org.apache.hadoop.hive.ql.io.orc.Colu" +
-      "mnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r\"\255\001\n" +
-      "\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n\013com" +
-      "pression\030\002 \001(\01621.org.apache.hadoop.hive." +
-      "ql.io.orc.CompressionKind\022\034\n\024compression" +
-      "BlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001\022\016\n\005" +
-      "magic\030\300> \001(\t*:\n\017CompressionKind\022\010\n\004NONE\020" +
-      "\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
+      "\022\013\n\003sum\030\003 \001(\022\"%\n\020BucketStatistics\022\021\n\005cou" +
+      "nt\030\001 \003(\004B\002\020\001\"B\n\021DecimalStatistics\022\017\n\007min" +
+      "imum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t" +
+      "\"2\n\016DateStatistics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007m",
+      "aximum\030\002 \001(\021\"\037\n\020BinaryStatistics\022\013\n\003sum\030" +
+      "\001 \001(\022\"\310\004\n\020ColumnStatistics\022\026\n\016numberOfVa" +
+      "lues\030\001 \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org." +
+      "apache.hadoop.hive.ql.io.orc.IntegerStat" +
+      "istics\022L\n\020doubleStatistics\030\003 \001(\01322.org.a" +
+      "pache.hadoop.hive.ql.io.orc.DoubleStatis" +
+      "tics\022L\n\020stringStatistics\030\004 \001(\01322.org.apa" +
+      "che.hadoop.hive.ql.io.orc.StringStatisti" +
+      "cs\022L\n\020bucketStatistics\030\005 \001(\01322.org.apach" +
+      "e.hadoop.hive.ql.io.orc.BucketStatistics",
+      "\022N\n\021decimalStatistics\030\006 \001(\01323.org.apache" +
+      ".hadoop.hive.ql.io.orc.DecimalStatistics" +
+      "\022H\n\016dateStatistics\030\007 \001(\01320.org.apache.ha" +
+      "doop.hive.ql.io.orc.DateStatistics\022L\n\020bi" +
+      "naryStatistics\030\010 \001(\01322.org.apache.hadoop" +
+      ".hive.ql.io.orc.BinaryStatistics\"n\n\rRowI" +
+      "ndexEntry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstat" +
+      "istics\030\002 \001(\01322.org.apache.hadoop.hive.ql" +
+      ".io.orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005" +
+      "entry\030\001 \003(\0132/.org.apache.hadoop.hive.ql.",
+      "io.orc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030" +
+      "\001 \002(\0162-.org.apache.hadoop.hive.ql.io.orc" +
+      ".Stream.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003" +
+      " \001(\004\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006L" +
+      "ENGTH\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONA" +
+      "RY_COUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006" +
+      "\"\263\001\n\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org." +
+      "apache.hadoop.hive.ql.io.orc.ColumnEncod" +
+      "ing.Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind" +
+      "\022\n\n\006DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V",
+      "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"\214\001\n\014StripeFooter\022" +
+      "9\n\007streams\030\001 \003(\0132(.org.apache.hadoop.hiv" +
+      "e.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320.or" +
+      "g.apache.hadoop.hive.ql.io.orc.ColumnEnc" +
+      "oding\"\314\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apach" +
+      "e.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010sub" +
+      "types\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rm" +
+      "aximumLength\030\004 \001(\r\"\307\001\n\004Kind\022\013\n\007BOOLEAN\020\000" +
+      "\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004" +
+      "\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006B",
+      "INARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020" +
+      "\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n" +
+      "\004DATE\020\017\022\013\n\007VARCHAR\020\020\"x\n\021StripeInformatio" +
+      "n\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001(\004\022\022" +
+      "\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength\030\004 \001(\004" +
+      "\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMetadataIt" +
+      "em\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"\356\002\n\006Foot" +
+      "er\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rcontentLengt" +
+      "h\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apache.ha" +
+      "doop.hive.ql.io.orc.StripeInformation\0225\n",
+      "\005types\030\004 \003(\0132&.org.apache.hadoop.hive.ql" +
+      ".io.orc.Type\022D\n\010metadata\030\005 \003(\01322.org.apa" +
+      "che.hadoop.hive.ql.io.orc.UserMetadataIt" +
+      "em\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007" +
+      " \003(\01322.org.apache.hadoop.hive.ql.io.orc." +
+      "ColumnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r" +
+      "\"\255\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" +
+      "\013compression\030\002 \001(\01621.org.apache.hadoop.h" +
+      "ive.ql.io.orc.CompressionKind\022\034\n\024compres" +
+      "sionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001",
+      "\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKind\022\010\n\004N" +
+      "ONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -14476,7 +15217,7 @@ public final class OrcProto {
           internal_static_org_apache_hadoop_hive_ql_io_orc_StringStatistics_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StringStatistics_descriptor,
-              new java.lang.String[] { "Minimum", "Maximum", });
+              new java.lang.String[] { "Minimum", "Maximum", "Sum", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_BucketStatistics_descriptor =
             getDescriptor().getMessageTypes().get(3);
           internal_static_org_apache_hadoop_hive_ql_io_orc_BucketStatistics_fieldAccessorTable = new
@@ -14495,68 +15236,74 @@ public final class OrcProto {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor,
               new java.lang.String[] { "Minimum", "Maximum", });
-          internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
+          internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor =
             getDescriptor().getMessageTypes().get(6);
+          internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor,
+              new java.lang.String[] { "Sum", });
+          internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
+            getDescriptor().getMessageTypes().get(7);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor,
-              new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", });
+              new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor =
-            getDescriptor().getMessageTypes().get(7);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor,
               new java.lang.String[] { "Positions", "Statistics", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor =
-            getDescriptor().getMessageTypes().get(8);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor,
               new java.lang.String[] { "Entry", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor =
-            getDescriptor().getMessageTypes().get(9);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor,
               new java.lang.String[] { "Kind", "Column", "Length", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor,
               new java.lang.String[] { "Kind", "DictionarySize", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor,
               new java.lang.String[] { "Streams", "Columns", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor,
               new java.lang.String[] { "Kind", "Subtypes", "FieldNames", "MaximumLength", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor,
               new java.lang.String[] { "Offset", "IndexLength", "DataLength", "FooterLength", "NumberOfRows", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor,
               new java.lang.String[] { "HeaderLength", "ContentLength", "Stripes", "Types", "Metadata", "NumberOfRows", "Statistics", "RowIndexStride", });
           internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor =
-            getDescriptor().getMessageTypes().get(16);
+            getDescriptor().getMessageTypes().get(17);
           internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor,

Modified: hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Fri Oct  4 21:30:38 2013
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -137,6 +138,8 @@ public class Driver implements CommandPr
   private static final int SLEEP_TIME = 2000;
   protected int tryCount = Integer.MAX_VALUE;
 
+  private String userName;
+
   private boolean checkLockManager() {
     boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
     if (!supportConcurrency) {
@@ -326,6 +329,11 @@ public class Driver implements CommandPr
   public Driver(HiveConf conf) {
     this.conf = conf;
   }
+  
+  public Driver(HiveConf conf, String userName) {
+    this(conf);
+    this.userName = userName;
+  }
 
   public Driver() {
     if (SessionState.get() != null) {
@@ -436,6 +444,7 @@ public class Driver implements CommandPr
       if (saHooks != null) {
         HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
         hookCtx.setConf(conf);
+        hookCtx.setUserName(userName);
         for (HiveSemanticAnalyzerHook hook : saHooks) {
           tree = hook.preAnalyze(hookCtx, tree);
         }

Modified: hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Fri Oct  4 21:30:38 2013
@@ -362,6 +362,7 @@ public enum ErrorMsg {
   UNSUPPORTED_ALTER_TBL_OP(10245, "{0} alter table options is not supported"),
   INVALID_BIGTABLE_MAPJOIN(10246, "{0} table chosen for streaming is not valid", true),
   MISSING_OVER_CLAUSE(10247, "Missing over clause for function : "),
+  PARTITION_SPEC_TYPE_MISMATCH(10248, "Cannot add partition column {0} of type {1} as it cannot be converted to type {2}", true),
 
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "

Modified: hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Fri Oct  4 21:30:38 2013
@@ -373,6 +373,12 @@ public class FetchOperator implements Se
       job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils.escapeString(currPath
           .toString()));
 
+      // Fetch operator is not vectorized and as such turn vectorization flag off so that
+      // non-vectorized record reader is created below.
+      if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) {
+        HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
+      }
+
       PartitionDesc partDesc;
       if (currTbl == null) {
         partDesc = currPart;

Modified: hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Fri Oct  4 21:30:38 2013
@@ -35,11 +35,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.io.FSRecordWriter.StatsProvidingRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
 import org.apache.hadoop.hive.ql.io.HivePartitioner;
+import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -84,11 +86,13 @@ public class FileSinkOperator extends Te
   protected transient int dpStartCol; // start column # for DP columns
   protected transient List<String> dpVals; // array of values corresponding to DP columns
   protected transient List<Object> dpWritables;
-  protected transient RecordWriter[] rowOutWriters; // row specific RecordWriters
+  protected transient FSRecordWriter[] rowOutWriters; // row specific RecordWriters
   protected transient int maxPartitions;
   protected transient ListBucketingCtx lbCtx;
   protected transient boolean isSkewedStoredAsSubDirectories;
-  private transient boolean statsCollectRawDataSize;
+  protected transient boolean statsCollectRawDataSize;
+  private transient boolean[] statsFromRecordWriter;
+  private transient boolean isCollectRWStats;
 
 
   private static final transient String[] FATAL_ERR_MSG = {
@@ -96,22 +100,12 @@ public class FileSinkOperator extends Te
       "Number of dynamic partitions exceeded hive.exec.max.dynamic.partitions.pernode."
   };
 
-  /**
-   * RecordWriter.
-   *
-   */
-  public static interface RecordWriter {
-    void write(Writable w) throws IOException;
-
-    void close(boolean abort) throws IOException;
-  }
-
   public class FSPaths implements Cloneable {
     Path tmpPath;
     Path taskOutputTempPath;
     Path[] outPaths;
     Path[] finalPaths;
-    RecordWriter[] outWriters;
+    FSRecordWriter[] outWriters;
     Stat stat;
 
     public FSPaths() {
@@ -122,7 +116,7 @@ public class FileSinkOperator extends Te
       taskOutputTempPath = Utilities.toTaskTempPath(specPath);
       outPaths = new Path[numFiles];
       finalPaths = new Path[numFiles];
-      outWriters = new RecordWriter[numFiles];
+      outWriters = new FSRecordWriter[numFiles];
       stat = new Stat();
     }
 
@@ -166,11 +160,11 @@ public class FileSinkOperator extends Te
       }
     }
 
-    public void setOutWriters(RecordWriter[] out) {
+    public void setOutWriters(FSRecordWriter[] out) {
       outWriters = out;
     }
 
-    public RecordWriter[] getOutWriters() {
+    public FSRecordWriter[] getOutWriters() {
       return outWriters;
     }
 
@@ -221,6 +215,10 @@ public class FileSinkOperator extends Te
         }
       }
     }
+
+    public Stat getStat() {
+      return stat;
+    }
   } // class FSPaths
 
   private static final long serialVersionUID = 1L;
@@ -228,7 +226,7 @@ public class FileSinkOperator extends Te
   protected transient Serializer serializer;
   protected transient BytesWritable commonKey = new BytesWritable();
   protected transient TableIdEnum tabIdEnum = null;
-  private transient LongWritable row_count;
+  protected transient LongWritable row_count;
   private transient boolean isNativeTable = true;
 
   /**
@@ -237,17 +235,17 @@ public class FileSinkOperator extends Te
    * each reducer can write 10 files - this way we effectively get 1000 files.
    */
   private transient ExprNodeEvaluator[] partitionEval;
-  private transient int totalFiles;
+  protected transient int totalFiles;
   private transient int numFiles;
-  private transient boolean multiFileSpray;
-  private transient final Map<Integer, Integer> bucketMap = new HashMap<Integer, Integer>();
+  protected transient boolean multiFileSpray;
+  protected transient final Map<Integer, Integer> bucketMap = new HashMap<Integer, Integer>();
 
   private transient ObjectInspector[] partitionObjectInspectors;
-  private transient HivePartitioner<HiveKey, Object> prtner;
-  private transient final HiveKey key = new HiveKey();
+  protected transient HivePartitioner<HiveKey, Object> prtner;
+  protected transient final HiveKey key = new HiveKey();
   private transient Configuration hconf;
-  private transient FSPaths fsp;
-  private transient boolean bDynParts;
+  protected transient FSPaths fsp;
+  protected transient boolean bDynParts;
   private transient SubStructObjectInspector subSetOI;
   private transient int timeOut; // JT timeout in msec.
   private transient long lastProgressReport = System.currentTimeMillis();
@@ -279,7 +277,7 @@ public class FileSinkOperator extends Te
   Class<? extends Writable> outputClass;
   String taskId;
 
-  private boolean filesCreated = false;
+  protected boolean filesCreated = false;
 
   private void initializeSpecPath() {
     // For a query of the type:
@@ -324,6 +322,7 @@ public class FileSinkOperator extends Te
       isCompressed = conf.getCompressed();
       parent = Utilities.toTempPath(conf.getDirName());
       statsCollectRawDataSize = conf.isStatsCollectRawDataSize();
+      statsFromRecordWriter = new boolean[numFiles];
 
       serializer = (Serializer) conf.getTableInfo().getDeserializerClass().newInstance();
       serializer.initialize(null, conf.getTableInfo().getProperties());
@@ -432,7 +431,7 @@ public class FileSinkOperator extends Te
     }
   }
 
-  private void createBucketFiles(FSPaths fsp) throws HiveException {
+  protected void createBucketFiles(FSPaths fsp) throws HiveException {
     try {
       int filesIdx = 0;
       Set<Integer> seenBuckets = new HashSet<Integer>();
@@ -516,6 +515,8 @@ public class FileSinkOperator extends Te
         fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(
             jc, conf.getTableInfo(), outputClass, conf, fsp.outPaths[filesIdx],
             reporter);
+        // If the record writer provides stats, get it from there instead of the serde
+        statsFromRecordWriter[filesIdx] = fsp.outWriters[filesIdx] instanceof StatsProvidingRecordWriter;
         // increment the CREATED_FILES counter
         if (reporter != null) {
           reporter.incrCounter(ProgressCounter.CREATED_FILES, 1);
@@ -544,7 +545,7 @@ public class FileSinkOperator extends Te
    *
    * @return true if a new progress update is reported, false otherwise.
    */
-  private boolean updateProgress() {
+  protected boolean updateProgress() {
     if (reporter != null &&
         (System.currentTimeMillis() - lastProgressReport) > timeOut) {
       reporter.progress();
@@ -555,7 +556,7 @@ public class FileSinkOperator extends Te
     }
   }
 
-  Writable recordValue;
+  protected Writable recordValue;
 
   @Override
   public void processOp(Object row, int tag) throws HiveException {
@@ -619,7 +620,11 @@ public class FileSinkOperator extends Te
       }
 
       rowOutWriters = fpaths.outWriters;
-      if (conf.isGatherStats()) {
+      // check if all record writers implement statistics. if atleast one RW
+      // doesn't implement stats interface we will fallback to conventional way
+      // of gathering stats
+      isCollectRWStats = areAllTrue(statsFromRecordWriter);
+      if (conf.isGatherStats() && !isCollectRWStats) {
         if (statsCollectRawDataSize) {
           SerDeStats stats = serializer.getSerDeStats();
           if (stats != null) {
@@ -630,12 +635,14 @@ public class FileSinkOperator extends Te
       }
 
 
+      FSRecordWriter rowOutWriter = null;
+
       if (row_count != null) {
         row_count.set(row_count.get() + 1);
       }
 
       if (!multiFileSpray) {
-        rowOutWriters[0].write(recordValue);
+        rowOutWriter = rowOutWriters[0];
       } else {
         int keyHashCode = 0;
         for (int i = 0; i < partitionEval.length; i++) {
@@ -646,8 +653,9 @@ public class FileSinkOperator extends Te
         key.setHashCode(keyHashCode);
         int bucketNum = prtner.getBucket(key, null, totalFiles);
         int idx = bucketMap.get(bucketNum);
-        rowOutWriters[idx].write(recordValue);
+        rowOutWriter = rowOutWriters[idx];
       }
+      rowOutWriter.write(recordValue);
     } catch (IOException e) {
       throw new HiveException(e);
     } catch (SerDeException e) {
@@ -655,13 +663,22 @@ public class FileSinkOperator extends Te
     }
   }
 
+  private boolean areAllTrue(boolean[] statsFromRW) {
+    for(boolean b : statsFromRW) {
+      if (!b) {
+        return false;
+      }
+    }
+    return true;
+  }
+
   /**
    * Lookup list bucketing path.
    * @param lbDirName
    * @return
    * @throws HiveException
    */
-  private FSPaths lookupListBucketingPaths(String lbDirName) throws HiveException {
+  protected FSPaths lookupListBucketingPaths(String lbDirName) throws HiveException {
     FSPaths fsp2 = valToPaths.get(lbDirName);
     if (fsp2 == null) {
       fsp2 = createNewPaths(lbDirName);
@@ -699,7 +716,7 @@ public class FileSinkOperator extends Te
    * @param row row to process.
    * @return directory name.
    */
-  private String generateListBucketingDirName(Object row) {
+  protected String generateListBucketingDirName(Object row) {
     if (!this.isSkewedStoredAsSubDirectories) {
       return null;
     }
@@ -740,7 +757,7 @@ public class FileSinkOperator extends Te
     return lbDirName;
   }
 
-  private FSPaths getDynOutPaths(List<String> row, String lbDirName) throws HiveException {
+  protected FSPaths getDynOutPaths(List<String> row, String lbDirName) throws HiveException {
 
     FSPaths fp;
 
@@ -864,6 +881,27 @@ public class FileSinkOperator extends Te
     if (!abort) {
       for (FSPaths fsp : valToPaths.values()) {
         fsp.closeWriters(abort);
+
+        // before closing the operator check if statistics gathering is requested
+        // and is provided by record writer. this is different from the statistics
+        // gathering done in processOp(). In processOp(), for each row added
+        // serde statistics about the row is gathered and accumulated in hashmap.
+        // this adds more overhead to the actual processing of row. But if the
+        // record writer already gathers the statistics, it can simply return the
+        // accumulated statistics which will be aggregated in case of spray writers
+        if (conf.isGatherStats() && isCollectRWStats) {
+          for (int idx = 0; idx < fsp.outWriters.length; idx++) {
+            FSRecordWriter outWriter = fsp.outWriters[idx];
+            if (outWriter != null) {
+              SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
+              if (stats != null) {
+                fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
+                fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
+              }
+            }
+          }
+        }
+
         if (isNativeTable) {
           fsp.commit(fs);
         }
@@ -934,7 +972,7 @@ public class FileSinkOperator extends Te
                  hiveOutputFormat = ReflectionUtils.newInstance(conf.getTableInfo().getOutputFileFormatClass(),job);
            }
           else {
-                 hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance(); 
+                 hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance();
           }
         }
         else {

Modified: hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (original)
+++ hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java Fri Oct  4 21:30:38 2013
@@ -46,13 +46,14 @@ public class FilterOperator extends Oper
     FILTERED, PASSED
   }
 
-  private final transient LongWritable filtered_count, passed_count;
+  protected final transient LongWritable filtered_count;
+  protected final transient LongWritable passed_count;
   private transient ExprNodeEvaluator conditionEvaluator;
   private transient PrimitiveObjectInspector conditionInspector;
   private transient int consecutiveFails;
   private transient int consecutiveSearches;
   private transient IOContext ioContext;
-  transient int heartbeatInterval;
+  protected transient int heartbeatInterval;
 
   public FilterOperator() {
     super();

Modified: hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java?rev=1529308&r1=1529307&r2=1529308&view=diff
==============================================================================
--- hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (original)
+++ hive/branches/maven/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java Fri Oct  4 21:30:38 2013
@@ -141,8 +141,16 @@ public class GroupByOperator extends Ope
   transient StructObjectInspector newKeyObjectInspector;
   transient StructObjectInspector currentKeyObjectInspector;
   public static MemoryMXBean memoryMXBean;
-  private long maxMemory;
-  private float memoryThreshold;
+
+  /**
+   * Total amount of memory allowed for JVM heap.
+   */
+  protected long maxMemory;
+
+  /**
+   * configure percent of memory threshold usable by QP.
+   */
+  protected float memoryThreshold;
 
   private boolean groupingSetsPresent;
   private int groupingSetsPosition;
@@ -159,10 +167,18 @@ public class GroupByOperator extends Ope
   transient List<Field>[] aggrPositions;
 
   transient int fixedRowSize;
-  transient long maxHashTblMemory;
+
+  /**
+   * Max memory usable by the hashtable before it should flush.
+   */
+  protected transient long maxHashTblMemory;
   transient int totalVariableSize;
   transient int numEntriesVarSize;
-  transient int numEntriesHashTable;
+
+  /**
+   * Current number of entries in the hash table.
+   */
+  protected transient int numEntriesHashTable;
   transient int countAfterReport;   // report or forward
   transient int heartbeatInterval;