You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/09/20 01:43:30 UTC

[3/5] PHOENIX-180 Use stats to guide query parallelization (Ramkrishna S Vasudevan)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/StatCollectorProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/StatCollectorProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/StatCollectorProtos.java
new file mode 100644
index 0000000..5008f22
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/StatCollectorProtos.java
@@ -0,0 +1,1269 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: StatisticsCollect.proto
+
+package org.apache.phoenix.coprocessor.generated;
+
+public final class StatCollectorProtos {
+  private StatCollectorProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface StatCollectRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional bytes startRow = 1;
+    /**
+     * <code>optional bytes startRow = 1;</code>
+     */
+    boolean hasStartRow();
+    /**
+     * <code>optional bytes startRow = 1;</code>
+     */
+    com.google.protobuf.ByteString getStartRow();
+
+    // optional bytes stopRow = 2;
+    /**
+     * <code>optional bytes stopRow = 2;</code>
+     */
+    boolean hasStopRow();
+    /**
+     * <code>optional bytes stopRow = 2;</code>
+     */
+    com.google.protobuf.ByteString getStopRow();
+  }
+  /**
+   * Protobuf type {@code StatCollectRequest}
+   */
+  public static final class StatCollectRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements StatCollectRequestOrBuilder {
+    // Use StatCollectRequest.newBuilder() to construct.
+    private StatCollectRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private StatCollectRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final StatCollectRequest defaultInstance;
+    public static StatCollectRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public StatCollectRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private StatCollectRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              startRow_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              stopRow_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.class, org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<StatCollectRequest> PARSER =
+        new com.google.protobuf.AbstractParser<StatCollectRequest>() {
+      public StatCollectRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new StatCollectRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<StatCollectRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional bytes startRow = 1;
+    public static final int STARTROW_FIELD_NUMBER = 1;
+    private com.google.protobuf.ByteString startRow_;
+    /**
+     * <code>optional bytes startRow = 1;</code>
+     */
+    public boolean hasStartRow() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional bytes startRow = 1;</code>
+     */
+    public com.google.protobuf.ByteString getStartRow() {
+      return startRow_;
+    }
+
+    // optional bytes stopRow = 2;
+    public static final int STOPROW_FIELD_NUMBER = 2;
+    private com.google.protobuf.ByteString stopRow_;
+    /**
+     * <code>optional bytes stopRow = 2;</code>
+     */
+    public boolean hasStopRow() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional bytes stopRow = 2;</code>
+     */
+    public com.google.protobuf.ByteString getStopRow() {
+      return stopRow_;
+    }
+
+    private void initFields() {
+      startRow_ = com.google.protobuf.ByteString.EMPTY;
+      stopRow_ = com.google.protobuf.ByteString.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, startRow_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, stopRow_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, startRow_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, stopRow_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest other = (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest) obj;
+
+      boolean result = true;
+      result = result && (hasStartRow() == other.hasStartRow());
+      if (hasStartRow()) {
+        result = result && getStartRow()
+            .equals(other.getStartRow());
+      }
+      result = result && (hasStopRow() == other.hasStopRow());
+      if (hasStopRow()) {
+        result = result && getStopRow()
+            .equals(other.getStopRow());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasStartRow()) {
+        hash = (37 * hash) + STARTROW_FIELD_NUMBER;
+        hash = (53 * hash) + getStartRow().hashCode();
+      }
+      if (hasStopRow()) {
+        hash = (37 * hash) + STOPROW_FIELD_NUMBER;
+        hash = (53 * hash) + getStopRow().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code StatCollectRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.class, org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.Builder.class);
+      }
+
+      // Construct using org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        startRow_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        stopRow_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectRequest_descriptor;
+      }
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest getDefaultInstanceForType() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.getDefaultInstance();
+      }
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest build() {
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest buildPartial() {
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest result = new org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.startRow_ = startRow_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.stopRow_ = stopRow_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest) {
+          return mergeFrom((org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest other) {
+        if (other == org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.getDefaultInstance()) return this;
+        if (other.hasStartRow()) {
+          setStartRow(other.getStartRow());
+        }
+        if (other.hasStopRow()) {
+          setStopRow(other.getStopRow());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional bytes startRow = 1;
+      private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
+      /**
+       * <code>optional bytes startRow = 1;</code>
+       */
+      public boolean hasStartRow() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional bytes startRow = 1;</code>
+       */
+      public com.google.protobuf.ByteString getStartRow() {
+        return startRow_;
+      }
+      /**
+       * <code>optional bytes startRow = 1;</code>
+       */
+      public Builder setStartRow(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        startRow_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bytes startRow = 1;</code>
+       */
+      public Builder clearStartRow() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        startRow_ = getDefaultInstance().getStartRow();
+        onChanged();
+        return this;
+      }
+
+      // optional bytes stopRow = 2;
+      private com.google.protobuf.ByteString stopRow_ = com.google.protobuf.ByteString.EMPTY;
+      /**
+       * <code>optional bytes stopRow = 2;</code>
+       */
+      public boolean hasStopRow() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional bytes stopRow = 2;</code>
+       */
+      public com.google.protobuf.ByteString getStopRow() {
+        return stopRow_;
+      }
+      /**
+       * <code>optional bytes stopRow = 2;</code>
+       */
+      public Builder setStopRow(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        stopRow_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bytes stopRow = 2;</code>
+       */
+      public Builder clearStopRow() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        stopRow_ = getDefaultInstance().getStopRow();
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:StatCollectRequest)
+    }
+
+    static {
+      defaultInstance = new StatCollectRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:StatCollectRequest)
+  }
+
+  public interface StatCollectResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required uint64 rowsScanned = 1;
+    /**
+     * <code>required uint64 rowsScanned = 1;</code>
+     */
+    boolean hasRowsScanned();
+    /**
+     * <code>required uint64 rowsScanned = 1;</code>
+     */
+    long getRowsScanned();
+  }
+  /**
+   * Protobuf type {@code StatCollectResponse}
+   */
+  public static final class StatCollectResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements StatCollectResponseOrBuilder {
+    // Use StatCollectResponse.newBuilder() to construct.
+    private StatCollectResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private StatCollectResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final StatCollectResponse defaultInstance;
+    public static StatCollectResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public StatCollectResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private StatCollectResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              rowsScanned_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.class, org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<StatCollectResponse> PARSER =
+        new com.google.protobuf.AbstractParser<StatCollectResponse>() {
+      public StatCollectResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new StatCollectResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<StatCollectResponse> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required uint64 rowsScanned = 1;
+    public static final int ROWSSCANNED_FIELD_NUMBER = 1;
+    private long rowsScanned_;
+    /**
+     * <code>required uint64 rowsScanned = 1;</code>
+     */
+    public boolean hasRowsScanned() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required uint64 rowsScanned = 1;</code>
+     */
+    public long getRowsScanned() {
+      return rowsScanned_;
+    }
+
+    private void initFields() {
+      rowsScanned_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasRowsScanned()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, rowsScanned_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, rowsScanned_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse other = (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse) obj;
+
+      boolean result = true;
+      result = result && (hasRowsScanned() == other.hasRowsScanned());
+      if (hasRowsScanned()) {
+        result = result && (getRowsScanned()
+            == other.getRowsScanned());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasRowsScanned()) {
+        hash = (37 * hash) + ROWSSCANNED_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getRowsScanned());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code StatCollectResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.class, org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.Builder.class);
+      }
+
+      // Construct using org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        rowsScanned_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.internal_static_StatCollectResponse_descriptor;
+      }
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse getDefaultInstanceForType() {
+        return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance();
+      }
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse build() {
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse buildPartial() {
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse result = new org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.rowsScanned_ = rowsScanned_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse) {
+          return mergeFrom((org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse other) {
+        if (other == org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance()) return this;
+        if (other.hasRowsScanned()) {
+          setRowsScanned(other.getRowsScanned());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasRowsScanned()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required uint64 rowsScanned = 1;
+      private long rowsScanned_ ;
+      /**
+       * <code>required uint64 rowsScanned = 1;</code>
+       */
+      public boolean hasRowsScanned() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required uint64 rowsScanned = 1;</code>
+       */
+      public long getRowsScanned() {
+        return rowsScanned_;
+      }
+      /**
+       * <code>required uint64 rowsScanned = 1;</code>
+       */
+      public Builder setRowsScanned(long value) {
+        bitField0_ |= 0x00000001;
+        rowsScanned_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint64 rowsScanned = 1;</code>
+       */
+      public Builder clearRowsScanned() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        rowsScanned_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:StatCollectResponse)
+    }
+
+    static {
+      defaultInstance = new StatCollectResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:StatCollectResponse)
+  }
+
+  /**
+   * Protobuf service {@code StatCollectService}
+   */
+  public static abstract class StatCollectService
+      implements com.google.protobuf.Service {
+    protected StatCollectService() {}
+
+    public interface Interface {
+      /**
+       * <code>rpc collectStat(.StatCollectRequest) returns (.StatCollectResponse);</code>
+       */
+      public abstract void collectStat(
+          com.google.protobuf.RpcController controller,
+          org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest request,
+          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse> done);
+
+    }
+
+    public static com.google.protobuf.Service newReflectiveService(
+        final Interface impl) {
+      return new StatCollectService() {
+        @java.lang.Override
+        public  void collectStat(
+            com.google.protobuf.RpcController controller,
+            org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest request,
+            com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse> done) {
+          impl.collectStat(controller, request, done);
+        }
+
+      };
+    }
+
+    public static com.google.protobuf.BlockingService
+        newReflectiveBlockingService(final BlockingInterface impl) {
+      return new com.google.protobuf.BlockingService() {
+        public final com.google.protobuf.Descriptors.ServiceDescriptor
+            getDescriptorForType() {
+          return getDescriptor();
+        }
+
+        public final com.google.protobuf.Message callBlockingMethod(
+            com.google.protobuf.Descriptors.MethodDescriptor method,
+            com.google.protobuf.RpcController controller,
+            com.google.protobuf.Message request)
+            throws com.google.protobuf.ServiceException {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.callBlockingMethod() given method descriptor for " +
+              "wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return impl.collectStat(controller, (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest)request);
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getRequestPrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getRequestPrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getResponsePrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getResponsePrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+      };
+    }
+
+    /**
+     * <code>rpc collectStat(.StatCollectRequest) returns (.StatCollectResponse);</code>
+     */
+    public abstract void collectStat(
+        com.google.protobuf.RpcController controller,
+        org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest request,
+        com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse> done);
+
+    public static final
+        com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptor() {
+      return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.getDescriptor().getServices().get(0);
+    }
+    public final com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+
+    public final void callMethod(
+        com.google.protobuf.Descriptors.MethodDescriptor method,
+        com.google.protobuf.RpcController controller,
+        com.google.protobuf.Message request,
+        com.google.protobuf.RpcCallback<
+          com.google.protobuf.Message> done) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.callMethod() given method descriptor for wrong " +
+          "service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          this.collectStat(controller, (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse>specializeCallback(
+              done));
+          return;
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public final com.google.protobuf.Message
+        getRequestPrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getRequestPrototype() given method " +
+          "descriptor for wrong service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public final com.google.protobuf.Message
+        getResponsePrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getResponsePrototype() given method " +
+          "descriptor for wrong service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          return org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public static Stub newStub(
+        com.google.protobuf.RpcChannel channel) {
+      return new Stub(channel);
+    }
+
+    public static final class Stub extends org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectService implements Interface {
+      private Stub(com.google.protobuf.RpcChannel channel) {
+        this.channel = channel;
+      }
+
+      private final com.google.protobuf.RpcChannel channel;
+
+      public com.google.protobuf.RpcChannel getChannel() {
+        return channel;
+      }
+
+      public  void collectStat(
+          com.google.protobuf.RpcController controller,
+          org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest request,
+          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(0),
+          controller,
+          request,
+          org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.class,
+            org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance()));
+      }
+    }
+
+    public static BlockingInterface newBlockingStub(
+        com.google.protobuf.BlockingRpcChannel channel) {
+      return new BlockingStub(channel);
+    }
+
+    public interface BlockingInterface {
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse collectStat(
+          com.google.protobuf.RpcController controller,
+          org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest request)
+          throws com.google.protobuf.ServiceException;
+    }
+
+    private static final class BlockingStub implements BlockingInterface {
+      private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+        this.channel = channel;
+      }
+
+      private final com.google.protobuf.BlockingRpcChannel channel;
+
+      public org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse collectStat(
+          com.google.protobuf.RpcController controller,
+          org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(0),
+          controller,
+          request,
+          org.apache.phoenix.coprocessor.generated.StatCollectorProtos.StatCollectResponse.getDefaultInstance());
+      }
+
+    }
+
+    // @@protoc_insertion_point(class_scope:StatCollectService)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_StatCollectRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_StatCollectRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_StatCollectResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_StatCollectResponse_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\027StatisticsCollect.proto\"7\n\022StatCollect" +
+      "Request\022\020\n\010startRow\030\001 \001(\014\022\017\n\007stopRow\030\002 \001" +
+      "(\014\"*\n\023StatCollectResponse\022\023\n\013rowsScanned" +
+      "\030\001 \002(\0042N\n\022StatCollectService\0228\n\013collectS" +
+      "tat\022\023.StatCollectRequest\032\024.StatCollectRe" +
+      "sponseBG\n(org.apache.phoenix.coprocessor" +
+      ".generatedB\023StatCollectorProtosH\001\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_StatCollectRequest_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_StatCollectRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_StatCollectRequest_descriptor,
+              new java.lang.String[] { "StartRow", "StopRow", });
+          internal_static_StatCollectResponse_descriptor =
+            getDescriptor().getMessageTypes().get(1);
+          internal_static_StatCollectResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_StatCollectResponse_descriptor,
+              new java.lang.String[] { "RowsScanned", });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
index 1498480..2f568db 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
@@ -18,33 +18,28 @@
 package org.apache.phoenix.iterate;
 
 import java.sql.SQLException;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.ListMultimap;
-import com.google.common.collect.Lists;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.parse.HintNode;
-import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.query.StatsManager;
+import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableRef;
-import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 
 
 /**
@@ -56,12 +51,11 @@ import org.apache.phoenix.util.ReadOnlyProps;
  */
 public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRegionSplitter {
 
-    protected final int targetConcurrency;
-    protected final int maxConcurrency;
-    protected final int maxIntraRegionParallelization;
+    protected final long guidePostsDepth;
     protected final StatementContext context;
     protected final TableRef tableRef;
 
+    private static final Logger logger = LoggerFactory.getLogger(DefaultParallelIteratorRegionSplitter.class);
     public static DefaultParallelIteratorRegionSplitter getInstance(StatementContext context, TableRef table, HintNode hintNode) {
         return new DefaultParallelIteratorRegionSplitter(context, table, hintNode);
     }
@@ -70,22 +64,16 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
         this.context = context;
         this.tableRef = table;
         ReadOnlyProps props = context.getConnection().getQueryServices().getProps();
-        this.targetConcurrency = props.getInt(QueryServices.TARGET_QUERY_CONCURRENCY_ATTRIB,
-                QueryServicesOptions.DEFAULT_TARGET_QUERY_CONCURRENCY);
-        this.maxConcurrency = props.getInt(QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB,
-                QueryServicesOptions.DEFAULT_MAX_QUERY_CONCURRENCY);
-        Preconditions.checkArgument(targetConcurrency >= 1, "Invalid target concurrency: " + targetConcurrency);
-        Preconditions.checkArgument(maxConcurrency >= targetConcurrency , "Invalid max concurrency: " + maxConcurrency);
-        this.maxIntraRegionParallelization = hintNode.hasHint(Hint.NO_INTRA_REGION_PARALLELIZATION) ? 1 : props.getInt(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB,
-                QueryServicesOptions.DEFAULT_MAX_INTRA_REGION_PARALLELIZATION);
-        Preconditions.checkArgument(maxIntraRegionParallelization >= 1 , "Invalid max intra region parallelization: " + maxIntraRegionParallelization);
+        this.guidePostsDepth = props.getLong(QueryServices.HISTOGRAM_BYTE_DEPTH_CONF_KEY,
+                QueryServicesOptions.DEFAULT_HISTOGRAM_BYTE_DEPTH);
     }
 
     // Get the mapping between key range and the regions that contains them.
     protected List<HRegionLocation> getAllRegions() throws SQLException {
         Scan scan = context.getScan();
         PTable table = tableRef.getTable();
-        List<HRegionLocation> allTableRegions = context.getConnection().getQueryServices().getAllTableRegions(table.getPhysicalName().getBytes());
+        List<HRegionLocation> allTableRegions = context.getConnection().getQueryServices()
+                .getAllTableRegions(table.getPhysicalName().getBytes());
         // If we're not salting, then we've already intersected the minMaxRange with the scan range
         // so there's nothing to do here.
         return filterRegions(allTableRegions, scan.getStartRow(), scan.getStopRow());
@@ -109,7 +97,8 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
         regions = Iterables.filter(allTableRegions, new Predicate<HRegionLocation>() {
             @Override
             public boolean apply(HRegionLocation location) {
-                KeyRange regionKeyRange = KeyRange.getKeyRange(location.getRegionInfo().getStartKey(), location.getRegionInfo().getEndKey());
+                KeyRange regionKeyRange = KeyRange.getKeyRange(location.getRegionInfo().getStartKey(), location
+                        .getRegionInfo().getEndKey());
                 return keyRange.intersect(regionKeyRange) != KeyRange.EMPTY_RANGE;
             }
         });
@@ -117,121 +106,68 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
     }
 
     protected List<KeyRange> genKeyRanges(List<HRegionLocation> regions) {
-        if (regions.isEmpty()) {
-            return Collections.emptyList();
-        }
-        
-        StatsManager statsManager = context.getConnection().getQueryServices().getStatsManager();
-        // the splits are computed as follows:
-        //
-        // let's suppose:
-        // t = target concurrency
-        // m = max concurrency
-        // r = the number of regions we need to scan
-        //
-        // if r >= t:
-        //    scan using regional boundaries
-        // elif r > t/2:
-        //    split each region in s splits such that:
-        //    s = max(x) where s * x < m
-        // else:
-        //    split each region in s splits such that:
-        //    s = max(x) where s * x < t
-        //
-        // The idea is to align splits with region boundaries. If rows are not evenly
-        // distributed across regions, using this scheme compensates for regions that
-        // have more rows than others, by applying tighter splits and therefore spawning
-        // off more scans over the overloaded regions.
-        int splitsPerRegion = getSplitsPerRegion(regions.size());
-        // Create a multi-map of ServerName to List<KeyRange> which we'll use to round robin from to ensure
-        // that we keep each region server busy for each query.
-        ListMultimap<HRegionLocation,KeyRange> keyRangesPerRegion = ArrayListMultimap.create(regions.size(),regions.size() * splitsPerRegion);;
-        if (splitsPerRegion == 1) {
-            for (HRegionLocation region : regions) {
-                keyRangesPerRegion.put(region, ParallelIterators.TO_KEY_RANGE.apply(region));
-            }
+        if (regions.isEmpty()) { return Collections.emptyList(); }
+        Scan scan = context.getScan();
+        PTable table = this.tableRef.getTable();
+        byte[] defaultCF = SchemaUtil.getEmptyColumnFamily(table);
+        List<byte[]> gps = Lists.newArrayList();
+
+        if (table.getColumnFamilies().isEmpty()) {
+            // For sure we can get the defaultCF from the table
+            gps = table.getGuidePosts();
         } else {
-            // Maintain bucket for each server and then returns KeyRanges in round-robin
-            // order to ensure all servers are utilized.
-            for (HRegionLocation region : regions) {
-                byte[] startKey = region.getRegionInfo().getStartKey();
-                byte[] stopKey = region.getRegionInfo().getEndKey();
-                boolean lowerUnbound = Bytes.compareTo(startKey, HConstants.EMPTY_START_ROW) == 0;
-                boolean upperUnbound = Bytes.compareTo(stopKey, HConstants.EMPTY_END_ROW) == 0;
-                /*
-                 * If lower/upper unbound, get the min/max key from the stats manager.
-                 * We use this as the boundary to split on, but we still use the empty
-                 * byte as the boundary in the actual scan (in case our stats are out
-                 * of date).
-                 */
-                if (lowerUnbound) {
-                    startKey = statsManager.getMinKey(tableRef);
-                    if (startKey == null) {
-                        keyRangesPerRegion.put(region,ParallelIterators.TO_KEY_RANGE.apply(region));
-                        continue;
+            try {
+                if (scan.getFamilyMap().size() > 0) {
+                    if (scan.getFamilyMap().containsKey(defaultCF)) { // Favor using default CF if it's used in scan
+                        gps = table.getColumnFamily(defaultCF).getGuidePosts();
+                    } else { // Otherwise, just use first CF in use by scan
+                        gps = table.getColumnFamily(scan.getFamilyMap().keySet().iterator().next()).getGuidePosts();
                     }
-                }
-                if (upperUnbound) {
-                    stopKey = statsManager.getMaxKey(tableRef);
-                    if (stopKey == null) {
-                        keyRangesPerRegion.put(region,ParallelIterators.TO_KEY_RANGE.apply(region));
-                        continue;
-                    }
-                }
-                
-                byte[][] boundaries = null;
-                // Both startKey and stopKey will be empty the first time
-                if (Bytes.compareTo(startKey, stopKey) >= 0 || (boundaries = Bytes.split(startKey, stopKey, splitsPerRegion - 1)) == null) {
-                    // Bytes.split may return null if the key space
-                    // between start and end key is too small
-                    keyRangesPerRegion.put(region,ParallelIterators.TO_KEY_RANGE.apply(region));
                 } else {
-                    keyRangesPerRegion.put(region,KeyRange.getKeyRange(lowerUnbound ? KeyRange.UNBOUND : boundaries[0], boundaries[1]));
-                    if (boundaries.length > 1) {
-                        for (int i = 1; i < boundaries.length-2; i++) {
-                            keyRangesPerRegion.put(region,KeyRange.getKeyRange(boundaries[i], true, boundaries[i+1], false));
-                        }
-                        keyRangesPerRegion.put(region,KeyRange.getKeyRange(boundaries[boundaries.length-2], true, upperUnbound ? KeyRange.UNBOUND : boundaries[boundaries.length-1], false));
-                    }
+                    gps = table.getColumnFamily(defaultCF).getGuidePosts();
                 }
+            } catch (ColumnFamilyNotFoundException cfne) {
+                // Alter table does this
             }
         }
-        List<KeyRange> splits = Lists.newArrayListWithCapacity(regions.size() * splitsPerRegion);
-        // as documented for ListMultimap
-        Collection<Collection<KeyRange>> values = keyRangesPerRegion.asMap().values();
-        List<Collection<KeyRange>> keyRangesList = Lists.newArrayList(values);
-        // Randomize range order to ensure that we don't hit the region servers in the same order every time
-        // thus helping to distribute the load more evenly
-        Collections.shuffle(keyRangesList);
-        // Transpose values in map to get regions in round-robin server order. This ensures that
-        // all servers will be used to process the set of parallel threads available in our executor.
-        int i = 0;
-        boolean done;
-        do {
-            done = true;
-            for (int j = 0; j < keyRangesList.size(); j++) {
-                List<KeyRange> keyRanges = (List<KeyRange>)keyRangesList.get(j);
-                if (i < keyRanges.size()) {
-                    splits.add(keyRanges.get(i));
-                    done = false;
+        List<KeyRange> guidePosts = Lists.newArrayListWithCapacity(regions.size());
+        byte[] currentKey = regions.get(0).getRegionInfo().getStartKey();
+        byte[] endKey = null;
+        int regionIndex = 0;
+        int guideIndex = 0;
+        int gpsSize = gps.size();
+        int regionSize = regions.size();
+        if (currentKey.length > 0) {
+            guideIndex = Collections.binarySearch(gps, currentKey, Bytes.BYTES_COMPARATOR);
+            guideIndex = (guideIndex < 0 ? -(guideIndex + 1) : (guideIndex + 1));
+        }
+        // Merge bisect with guideposts for all but the last region
+        while (regionIndex < regionSize) {
+            byte[] currentGuidePost;
+            currentKey = regions.get(regionIndex).getRegionInfo().getStartKey();
+            endKey = regions.get(regionIndex++).getRegionInfo().getEndKey();
+            while (guideIndex < gpsSize
+                    && (Bytes.compareTo(currentGuidePost = gps.get(guideIndex), endKey) <= 0 || endKey.length == 0)) {
+                KeyRange keyRange = KeyRange.getKeyRange(currentKey, currentGuidePost);
+                if (keyRange != KeyRange.EMPTY_RANGE) {
+                    guidePosts.add(keyRange);
                 }
+                currentKey = currentGuidePost;
+                guideIndex++;
+            }
+            KeyRange keyRange = KeyRange.getKeyRange(currentKey, endKey);
+            if (keyRange != KeyRange.EMPTY_RANGE) {
+                guidePosts.add(keyRange);
             }
-            i++;
-        } while (!done);
-        return splits;
+        }
+        if (logger.isDebugEnabled()) {
+            logger.debug("The captured guideposts are: " + guidePosts);
+        }
+        return guidePosts;
     }
-
+        
     @Override
     public List<KeyRange> getSplits() throws SQLException {
         return genKeyRanges(getAllRegions());
     }
-
-    @Override
-    public int getSplitsPerRegion(int numRegions) {
-        int splitsPerRegion =
-                numRegions >= targetConcurrency ? 1
-                        : (numRegions > targetConcurrency / 2 ? maxConcurrency : targetConcurrency)
-                                / numRegions;
-        return Math.min(splitsPerRegion, maxIntraRegionParallelization);
-    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/iterate/LocalIndexParallelIteratorRegionSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/LocalIndexParallelIteratorRegionSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/LocalIndexParallelIteratorRegionSplitter.java
index c3a38d5..14da71e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/LocalIndexParallelIteratorRegionSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/LocalIndexParallelIteratorRegionSplitter.java
@@ -39,9 +39,5 @@ public class LocalIndexParallelIteratorRegionSplitter extends DefaultParallelIte
     protected List<HRegionLocation> getAllRegions() throws SQLException {
         return context.getConnection().getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
     }
-
-    @Override
-    public int getSplitsPerRegion(int numRegions) {
-        return 1;
-    }
+    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java
index efd9eec..74029fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java
@@ -32,6 +32,4 @@ public interface ParallelIteratorRegionSplitter {
 
     public List<KeyRange> getSplits() throws SQLException;
 
-    public int getSplitsPerRegion(int numRegions);
-
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 99b5910..a800fd9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -106,6 +106,10 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final byte[] SYSTEM_CATALOG_TABLE_BYTES = Bytes.toBytes(SYSTEM_CATALOG_SCHEMA);
     public static final String SYSTEM_CATALOG_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_CATALOG_TABLE);
     public static final byte[] SYSTEM_CATALOG_NAME_BYTES = SchemaUtil.getTableNameAsBytes(SYSTEM_CATALOG_TABLE_BYTES, SYSTEM_CATALOG_SCHEMA_BYTES);
+    public static final String SYSTEM_STATS_TABLE = "STATS";
+    public static final byte[] SYSTEM_STATS_BYTES = Bytes.toBytes(SYSTEM_STATS_TABLE);
+    public static final String SYSTEM_STATS_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_STATS_TABLE);
+    public static final byte[] SYSTEM_STATS_NAME_BYTES = SchemaUtil.getTableNameAsBytes(SYSTEM_CATALOG_TABLE_BYTES, SYSTEM_STATS_BYTES);
     
     public static final String SYSTEM_CATALOG_ALIAS = "\"SYSTEM.TABLE\"";
 
@@ -113,6 +117,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final byte[] TABLE_NAME_BYTES = Bytes.toBytes(TABLE_NAME);
     public static final String TABLE_TYPE = "TABLE_TYPE";
     public static final byte[] TABLE_TYPE_BYTES = Bytes.toBytes(TABLE_TYPE);
+    public static final String PHYSICAL_NAME = "PHYSICAL_NAME";
+    public static final byte[] PHYSICAL_NAME_BYTES = Bytes.toBytes(PHYSICAL_NAME);
     
     public static final String COLUMN_FAMILY = "COLUMN_FAMILY";
     public static final String TABLE_CAT = "TABLE_CAT";
@@ -224,6 +230,16 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final String INDEX_DISABLE_TIMESTAMP = "INDEX_DISABLE_TIMESTAMP";
     public static final byte[] INDEX_DISABLE_TIMESTAMP_BYTES = Bytes.toBytes(INDEX_DISABLE_TIMESTAMP);
     
+    public static final String REGION_NAME = "REGION_NAME";
+    public static final byte[] REGION_NAME_BYTES = Bytes.toBytes(REGION_NAME);
+    public static final String GUIDE_POSTS = "GUIDE_POSTS";
+    public static final byte[] GUIDE_POSTS_BYTES = Bytes.toBytes(GUIDE_POSTS);
+    public static final String MIN_KEY = "MIN_KEY";
+    public static final byte[] MIN_KEY_BYTES = Bytes.toBytes(MIN_KEY);
+    public static final String MAX_KEY = "MAX_KEY";
+    public static final byte[] MAX_KEY_BYTES = Bytes.toBytes(MAX_KEY);
+    public static final String LAST_STATS_UPDATE_TIME = "LAST_STATS_UPDATE_TIME";
+    public static final byte[] LAST_STATS_UPDATE_TIME_BYTES = Bytes.toBytes(LAST_STATS_UPDATE_TIME);
     private final PhoenixConnection connection;
     private final ResultSet emptyResultSet;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 00d271d..f7d6e14 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -91,6 +91,7 @@ import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.parse.TableNode;
+import org.apache.phoenix.parse.UpdateStatisticsStatement;
 import org.apache.phoenix.parse.UpsertStatement;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
@@ -650,6 +651,49 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
             };
         }
     }
+    
+    private static class ExecutableUpdateStatisticsStatement extends UpdateStatisticsStatement implements
+            CompilableStatement {
+
+        public ExecutableUpdateStatisticsStatement(NamedTableNode table) {
+            super(table);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException {
+            final StatementContext context = new StatementContext(stmt);
+            return new MutationPlan() {
+
+                @Override
+                public StatementContext getContext() {
+                    return context;
+                }
+
+                @Override
+                public ParameterMetaData getParameterMetaData() {
+                    return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
+                }
+
+                @Override
+                public ExplainPlan getExplainPlan() throws SQLException {
+                    return new ExplainPlan(Collections.singletonList("ANALYZE"));
+                }
+
+                @Override
+                public PhoenixConnection getConnection() {
+                    return stmt.getConnection();
+                }
+
+                @Override
+                public MutationState execute() throws SQLException {
+                    MetaDataClient client = new MetaDataClient(getConnection());
+                    return client.updateStatistics(ExecutableUpdateStatisticsStatement.this);
+                }
+            };
+        }
+
+    }
 
     private static class ExecutableAddColumnStatement extends AddColumnStatement implements CompilableStatement {
 
@@ -806,6 +850,11 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         public ExplainStatement explain(BindableStatement statement) {
             return new ExecutableExplainStatement(statement);
         }
+
+        @Override
+        public UpdateStatisticsStatement updateStatistics(NamedTableNode table) {
+            return new ExecutableUpdateStatisticsStatement(table);
+        }
     }
     
     static class PhoenixStatementParser extends SQLParser {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index e16849f..16423e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -339,6 +339,10 @@ public class ParseNodeFactory {
     public DivideParseNode divide(List<ParseNode> children) {
         return new DivideParseNode(children);
     }
+    
+    public UpdateStatisticsStatement updateStatistics(NamedTableNode table) {
+      return new UpdateStatisticsStatement(table);
+    }
 
 
     public FunctionParseNode functionDistinct(String name, List<ParseNode> args) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
new file mode 100644
index 0000000..9eff74a
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+public class UpdateStatisticsStatement extends SingleTableStatement {
+
+    public UpdateStatisticsStatement(NamedTableNode table) {
+        super(table, 0);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index ddf6b61..0c1f45d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -63,8 +63,6 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
 
     public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException;
 
-    public StatsManager getStatsManager();
-
     public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException;
 
     public PhoenixConnection connect(String url, Properties info) throws SQLException;
@@ -96,6 +94,9 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
     void addConnection(PhoenixConnection connection) throws SQLException;
     void removeConnection(PhoenixConnection connection) throws SQLException;
 
+    long updateStatistics(KeyRange keyRange, byte[] tableName)
+            throws SQLException;
+
     /**
      * @return the {@link KeyValueBuilder} that is valid for the locally installed version of HBase.
      */
@@ -105,4 +106,5 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
     public boolean supportsFeature(Feature feature);
     
     public String getUserName();
+    public void clearCacheForTable(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
 }
\ No newline at end of file