You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by st...@apache.org on 2023/12/12 06:19:53 UTC

(phoenix) branch master updated: PHOENIX-6053 Split Server Side Code into a Separate Module

This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 7cd93529be PHOENIX-6053 Split Server Side Code into a Separate Module
7cd93529be is described below

commit 7cd93529bef856091fa42414e7874c6de7310a7e
Author: Aron Meszaros <me...@gmail.com>
AuthorDate: Mon Sep 11 14:34:43 2023 +0200

    PHOENIX-6053 Split Server Side Code into a Separate Module
---
 .../phoenix-client-embedded/pom.xml                |    2 +-
 phoenix-core-client/pom.xml                        |  434 ++
 .../src/build/phoenix-core.xml                     |    0
 .../src/main/antlr3/PhoenixSQL.g                   |    0
 .../org/apache/hadoop/hbase/PhoenixTagType.java    |    0
 .../apache/hadoop/hbase/client/RegionInfoUtil.java |    0
 .../ipc/controller/ClientRpcControllerFactory.java |    0
 .../hbase/ipc/controller/IndexRpcController.java   |   57 +
 ...InterRegionServerIndexRpcControllerFactory.java |    0
 .../ipc/controller/MetadataRpcController.java      |   74 +
 .../controller/ServerSideRPCControllerFactory.java |    0
 .../controller/ServerToServerRpcController.java    |    0
 .../ServerToServerRpcControllerImpl.java           |   75 +
 .../java/org/apache/phoenix/cache/HashCache.java   |    0
 .../apache/phoenix/cache/IndexMetaDataCache.java   |    0
 .../apache/phoenix/cache/JodaTimezoneCache.java    |    0
 .../apache/phoenix/cache/ServerCacheClient.java    |  553 ++
 .../java/org/apache/phoenix/cache/TenantCache.java |   42 +
 .../org/apache/phoenix/cache/TenantCacheImpl.java  |  287 +
 .../java/org/apache/phoenix/call/CallRunner.java   |    0
 .../java/org/apache/phoenix/call/CallWrapper.java  |    0
 .../apache/phoenix/compile/AggregationManager.java |  115 +
 .../apache/phoenix/compile/BaseMutationPlan.java   |    0
 .../org/apache/phoenix/compile/BindManager.java    |    0
 .../phoenix/compile/CloseStatementCompiler.java    |    0
 .../ColumnNameTrackingExpressionCompiler.java      |    0
 .../apache/phoenix/compile/ColumnProjector.java    |    0
 .../org/apache/phoenix/compile/ColumnResolver.java |    0
 .../org/apache/phoenix/compile/CompiledOffset.java |    0
 .../phoenix/compile/CreateFunctionCompiler.java    |    0
 .../phoenix/compile/CreateIndexCompiler.java       |    0
 .../phoenix/compile/CreateSchemaCompiler.java      |    0
 .../phoenix/compile/CreateSequenceCompiler.java    |    0
 .../phoenix/compile/CreateTableCompiler.java       |    0
 .../phoenix/compile/DeclareCursorCompiler.java     |    0
 .../phoenix/compile/DelegateMutationPlan.java      |    0
 .../org/apache/phoenix/compile/DeleteCompiler.java | 1030 ++++
 .../phoenix/compile/DropSequenceCompiler.java      |    0
 .../org/apache/phoenix/compile/ExplainPlan.java    |    0
 .../phoenix/compile/ExplainPlanAttributes.java     |    0
 .../apache/phoenix/compile/ExpressionCompiler.java |    0
 .../apache/phoenix/compile/ExpressionManager.java  |    0
 .../phoenix/compile/ExpressionProjector.java       |    0
 .../org/apache/phoenix/compile/FromCompiler.java   | 1233 ++++
 .../apache/phoenix/compile/GroupByCompiler.java    |  476 ++
 .../org/apache/phoenix/compile/HavingCompiler.java |    0
 .../phoenix/compile/IndexExpressionCompiler.java   |    0
 .../phoenix/compile/IndexStatementRewriter.java    |    0
 .../org/apache/phoenix/compile/JoinCompiler.java   | 1589 +++++
 .../java/org/apache/phoenix/compile/KeyPart.java   |    0
 .../org/apache/phoenix/compile/LimitCompiler.java  |    0
 .../apache/phoenix/compile/ListJarsQueryPlan.java  |    0
 .../compile/MutatingParallelIteratorFactory.java   |    0
 .../org/apache/phoenix/compile/MutationPlan.java   |    0
 .../org/apache/phoenix/compile/OffsetCompiler.java |    0
 .../phoenix/compile/OpenStatementCompiler.java     |    0
 .../apache/phoenix/compile/OrderByCompiler.java    |    0
 .../phoenix/compile/OrderPreservingTracker.java    |    0
 .../apache/phoenix/compile/PostDDLCompiler.java    |  372 ++
 .../phoenix/compile/PostIndexDDLCompiler.java      |    0
 .../phoenix/compile/PostLocalIndexDDLCompiler.java |  133 +
 .../apache/phoenix/compile/ProjectionCompiler.java |  796 +++
 .../org/apache/phoenix/compile/QueryCompiler.java  |  814 +++
 .../java/org/apache/phoenix/compile/QueryPlan.java |    0
 .../apache/phoenix/compile/RVCOffsetCompiler.java  |    0
 .../org/apache/phoenix/compile/RowProjector.java   |    0
 .../org/apache/phoenix/compile/ScanRanges.java     |  785 +++
 .../apache/phoenix/compile/SequenceManager.java    |    0
 .../phoenix/compile/SequenceValueExpression.java   |    0
 .../phoenix/compile/ServerBuildIndexCompiler.java  |  161 +
 .../ServerBuildTransformingTableCompiler.java      |   99 +
 .../compile/StatelessExpressionCompiler.java       |    0
 .../apache/phoenix/compile/StatementContext.java   |    0
 .../phoenix/compile/StatementNormalizer.java       |    0
 .../org/apache/phoenix/compile/StatementPlan.java  |    0
 .../apache/phoenix/compile/SubqueryRewriter.java   |    0
 .../apache/phoenix/compile/SubselectRewriter.java  |    0
 .../org/apache/phoenix/compile/TraceQueryPlan.java |    0
 .../phoenix/compile/TupleProjectionCompiler.java   |    0
 .../org/apache/phoenix/compile/UnionCompiler.java  |    0
 .../org/apache/phoenix/compile/UpsertCompiler.java | 1455 +++++
 .../org/apache/phoenix/compile/WhereCompiler.java  |  958 +++
 .../org/apache/phoenix/compile/WhereOptimizer.java |    0
 .../BaseScannerRegionObserverConstants.java        |  157 +
 .../HashJoinCacheNotFoundException.java            |   45 +
 .../MetaDataEndpointImplConstants.java             |   35 +
 .../coprocessorclient/MetaDataProtocol.java        |  521 ++
 .../ScanRegionObserverConstants.java               |   29 +
 .../SequenceRegionObserverConstants.java           |   25 +
 .../coprocessorclient/ServerCachingProtocol.java   |   61 +
 .../phoenix/coprocessorclient/TableInfo.java       |   79 +
 .../UngroupedAggregateRegionObserverHelper.java    |   59 +
 .../coprocessorclient/WhereConstantParser.java     |  110 +
 .../MetricsPhoenixCoprocessorSourceFactory.java    |   45 +
 .../metrics/MetricsPhoenixTTLSource.java           |   59 +
 .../metrics/MetricsPhoenixTTLSourceImpl.java       |   58 +
 .../tasks/IndexRebuildTaskConstants.java           |   25 +
 .../exception/DataExceedsCapacityException.java    |    0
 .../phoenix/exception/FailoverSQLException.java    |    0
 .../InvalidRegionSplitPolicyException.java         |    0
 .../phoenix/exception/PhoenixIOException.java      |    0
 .../PhoenixNonRetryableRuntimeException.java       |    0
 .../phoenix/exception/PhoenixParserException.java  |    0
 .../exception/RetriableUpgradeException.java       |    0
 .../apache/phoenix/exception/SQLExceptionCode.java |  694 +++
 .../apache/phoenix/exception/SQLExceptionInfo.java |    0
 .../exception/UndecodableByteException.java        |    0
 .../exception/UnknownFunctionException.java        |    0
 .../exception/UpgradeInProgressException.java      |   31 +
 .../exception/UpgradeNotRequiredException.java     |    0
 .../exception/UpgradeRequiredException.java        |    0
 .../org/apache/phoenix/execute/AggregatePlan.java  |  405 ++
 .../org/apache/phoenix/execute/BaseQueryPlan.java  |  579 ++
 .../phoenix/execute/ClientAggregatePlan.java       |  384 ++
 .../phoenix/execute/ClientProcessingPlan.java      |    0
 .../org/apache/phoenix/execute/ClientScanPlan.java |    0
 .../apache/phoenix/execute/CommitException.java    |    0
 .../apache/phoenix/execute/CursorFetchPlan.java    |    0
 .../org/apache/phoenix/execute/DelegateHTable.java |    0
 .../apache/phoenix/execute/DelegateQueryPlan.java  |    0
 .../execute/DescVarLengthFastByteComparisons.java  |    0
 .../org/apache/phoenix/execute/HashJoinPlan.java   |  700 +++
 .../execute/LiteralResultIterationPlan.java        |    0
 .../org/apache/phoenix/execute/MutationState.java  | 2274 +++++++
 .../execute/PhoenixTxIndexMutationGenerator.java   |  522 ++
 .../org/apache/phoenix/execute/RuntimeContext.java |    0
 .../apache/phoenix/execute/RuntimeContextImpl.java |    0
 .../java/org/apache/phoenix/execute/ScanPlan.java  |  405 ++
 .../apache/phoenix/execute/SortMergeJoinPlan.java  |    0
 .../phoenix/execute/TupleProjectionPlan.java       |    0
 .../org/apache/phoenix/execute/TupleProjector.java |  495 ++
 .../java/org/apache/phoenix/execute/UnionPlan.java |    0
 .../apache/phoenix/execute/UnnestArrayPlan.java    |    0
 .../execute/visitor/AvgRowWidthVisitor.java        |    0
 .../phoenix/execute/visitor/ByteCountVisitor.java  |    0
 .../phoenix/execute/visitor/QueryPlanVisitor.java  |    0
 .../phoenix/execute/visitor/RowCountVisitor.java   |    0
 .../apache/phoenix/expression/AddExpression.java   |    0
 .../apache/phoenix/expression/AndExpression.java   |    0
 .../apache/phoenix/expression/AndOrExpression.java |    0
 .../phoenix/expression/ArithmeticExpression.java   |    0
 .../expression/ArrayConstructorExpression.java     |    0
 .../expression/BaseAddSubtractExpression.java      |    0
 .../phoenix/expression/BaseCompoundExpression.java |    0
 .../BaseDecimalAddSubtractExpression.java          |    0
 .../apache/phoenix/expression/BaseExpression.java  |    0
 .../phoenix/expression/BaseSingleExpression.java   |    0
 .../phoenix/expression/BaseTerminalExpression.java |    0
 .../expression/ByteBasedLikeExpression.java        |    0
 .../apache/phoenix/expression/CaseExpression.java  |    0
 .../phoenix/expression/CoerceExpression.java       |    0
 .../phoenix/expression/ColumnExpression.java       |    0
 .../phoenix/expression/ComparisonExpression.java   |    0
 .../CorrelateVariableFieldAccessExpression.java    |    0
 .../expression/CurrentDateTimeFunction.java        |    0
 .../phoenix/expression/DateAddExpression.java      |    0
 .../phoenix/expression/DateSubtractExpression.java |    0
 .../phoenix/expression/DecimalAddExpression.java   |    0
 .../expression/DecimalDivideExpression.java        |    0
 .../expression/DecimalMultiplyExpression.java      |    0
 .../expression/DecimalSubtractExpression.java      |    0
 .../phoenix/expression/DelegateExpression.java     |    0
 .../org/apache/phoenix/expression/Determinism.java |    0
 .../phoenix/expression/DivideExpression.java       |    0
 .../phoenix/expression/DoubleAddExpression.java    |    0
 .../phoenix/expression/DoubleDivideExpression.java |    0
 .../expression/DoubleMultiplyExpression.java       |    0
 .../expression/DoubleSubtractExpression.java       |    0
 .../org/apache/phoenix/expression/Expression.java  |    0
 .../apache/phoenix/expression/ExpressionType.java  |    0
 .../phoenix/expression/InListExpression.java       |    0
 .../phoenix/expression/IsNullExpression.java       |    0
 .../expression/KeyValueColumnExpression.java       |    0
 .../apache/phoenix/expression/LikeExpression.java  |    0
 .../phoenix/expression/LiteralExpression.java      |    0
 .../phoenix/expression/LongAddExpression.java      |    0
 .../phoenix/expression/LongDivideExpression.java   |    0
 .../phoenix/expression/LongMultiplyExpression.java |    0
 .../phoenix/expression/LongSubtractExpression.java |    0
 .../phoenix/expression/ModulusExpression.java      |    0
 .../phoenix/expression/MultiplyExpression.java     |    0
 .../apache/phoenix/expression/NotExpression.java   |    0
 .../apache/phoenix/expression/OrExpression.java    |    0
 .../phoenix/expression/OrderByExpression.java      |    0
 .../expression/ProjectedColumnExpression.java      |    0
 .../phoenix/expression/RowKeyColumnExpression.java |    0
 .../phoenix/expression/RowKeyExpression.java       |    0
 .../expression/RowValueConstructorExpression.java  |    0
 .../expression/SingleCellColumnExpression.java     |    0
 .../SingleCellConstructorExpression.java           |    0
 .../expression/StringBasedLikeExpression.java      |    0
 .../phoenix/expression/StringConcatExpression.java |    0
 .../phoenix/expression/SubtractExpression.java     |    0
 .../phoenix/expression/TimestampAddExpression.java |    0
 .../expression/TimestampSubtractExpression.java    |    0
 .../phoenix/expression/aggregator/Aggregator.java  |    0
 .../phoenix/expression/aggregator/Aggregators.java |    0
 .../expression/aggregator/BaseAggregator.java      |    0
 .../aggregator/BaseDecimalStddevAggregator.java    |    0
 .../aggregator/BaseStddevAggregator.java           |    0
 .../expression/aggregator/ClientAggregators.java   |    0
 .../expression/aggregator/CountAggregator.java     |    0
 .../aggregator/DecimalStddevPopAggregator.java     |    0
 .../aggregator/DecimalStddevSampAggregator.java    |    0
 .../aggregator/DecimalSumAggregator.java           |    0
 .../aggregator/DistinctCountClientAggregator.java  |    0
 .../aggregator/DistinctValueClientAggregator.java  |    0
 .../DistinctValueWithCountClientAggregator.java    |    0
 .../DistinctValueWithCountServerAggregator.java    |    0
 .../expression/aggregator/DoubleSumAggregator.java |    0
 .../FirstLastValueBaseClientAggregator.java        |    0
 .../aggregator/FirstLastValueServerAggregator.java |    0
 .../expression/aggregator/IntSumAggregator.java    |    0
 .../expression/aggregator/LongSumAggregator.java   |    0
 .../expression/aggregator/MaxAggregator.java       |    0
 .../expression/aggregator/MinAggregator.java       |    0
 .../NonSizeTrackingServerAggregators.java          |    0
 .../expression/aggregator/NumberSumAggregator.java |    0
 .../aggregator/PercentRankClientAggregator.java    |    0
 .../aggregator/PercentileClientAggregator.java     |    0
 .../aggregator/PercentileDiscClientAggregator.java |    0
 .../expression/aggregator/ServerAggregators.java   |    0
 .../aggregator/SizeTrackingServerAggregators.java  |    0
 .../expression/aggregator/StddevPopAggregator.java |    0
 .../aggregator/StddevSampAggregator.java           |    0
 .../aggregator/UnsignedIntSumAggregator.java       |    0
 .../aggregator/UnsignedLongSumAggregator.java      |    0
 .../phoenix/expression/function/AbsFunction.java   |    0
 .../expression/function/AggregateFunction.java     |    0
 .../function/ArrayAllComparisonExpression.java     |    0
 .../function/ArrayAnyComparisonExpression.java     |    0
 .../expression/function/ArrayAppendFunction.java   |    0
 .../expression/function/ArrayConcatFunction.java   |    0
 .../function/ArrayElemRefExpression.java           |    0
 .../expression/function/ArrayFillFunction.java     |    0
 .../expression/function/ArrayIndexFunction.java    |    0
 .../expression/function/ArrayLengthFunction.java   |    0
 .../expression/function/ArrayModifierFunction.java |    0
 .../expression/function/ArrayPrependFunction.java  |    0
 .../expression/function/ArrayRemoveFunction.java   |    0
 .../expression/function/ArrayToStringFunction.java |    0
 .../expression/function/AvgAggregateFunction.java  |    0
 .../function/ByteBasedRegexpReplaceFunction.java   |    0
 .../function/ByteBasedRegexpSplitFunction.java     |    0
 .../function/ByteBasedRegexpSubstrFunction.java    |    0
 .../phoenix/expression/function/CbrtFunction.java  |    0
 .../expression/function/CeilDateExpression.java    |    0
 .../expression/function/CeilDecimalExpression.java |    0
 .../phoenix/expression/function/CeilFunction.java  |    0
 .../expression/function/CeilMonthExpression.java   |    0
 .../function/CeilTimestampExpression.java          |    0
 .../expression/function/CeilWeekExpression.java    |    0
 .../expression/function/CeilYearExpression.java    |    0
 .../expression/function/CoalesceFunction.java      |    0
 .../expression/function/CollationKeyFunction.java  |    0
 .../function/CompositeAggregateFunction.java       |    0
 .../function/ConvertTimezoneFunction.java          |    0
 .../phoenix/expression/function/CosFunction.java   |    0
 .../function/CountAggregateFunction.java           |    0
 .../expression/function/CurrentDateFunction.java   |    0
 .../expression/function/CurrentTimeFunction.java   |    0
 .../expression/function/DateScalarFunction.java    |    0
 .../expression/function/DayOfMonthFunction.java    |    0
 .../expression/function/DayOfWeekFunction.java     |    0
 .../expression/function/DayOfYearFunction.java     |    0
 .../expression/function/DecodeFunction.java        |    0
 .../function/DefaultValueExpression.java           |    0
 .../DelegateConstantToCountAggregateFunction.java  |    0
 .../function/DistinctCountAggregateFunction.java   |    0
 .../DistinctCountHyperLogLogAggregateFunction.java |    0
 .../function/DistinctValueAggregateFunction.java   |    0
 .../DistinctValueWithCountAggregateFunction.java   |    0
 .../phoenix/expression/function/EncodeFormat.java  |    0
 .../expression/function/EncodeFunction.java        |    0
 .../phoenix/expression/function/ExpFunction.java   |    0
 .../function/ExternalSqlTypeIdFunction.java        |    0
 .../function/FirstLastValueBaseFunction.java       |    0
 .../expression/function/FirstValueFunction.java    |    0
 .../expression/function/FirstValuesFunction.java   |    0
 .../expression/function/FloorDateExpression.java   |    0
 .../function/FloorDecimalExpression.java           |    0
 .../phoenix/expression/function/FloorFunction.java |    0
 .../expression/function/FloorMonthExpression.java  |    0
 .../expression/function/FloorWeekExpression.java   |    0
 .../expression/function/FloorYearExpression.java   |    0
 .../expression/function/FunctionArgumentType.java  |    0
 .../expression/function/FunctionExpression.java    |    0
 .../expression/function/GetBitFunction.java        |    0
 .../expression/function/GetByteFunction.java       |    0
 .../phoenix/expression/function/HourFunction.java  |    0
 .../function/IndexStateNameFunction.java           |    0
 .../phoenix/expression/function/InstrFunction.java |    0
 .../expression/function/InvertFunction.java        |    0
 .../function/JavaMathOneArgumentFunction.java      |    0
 .../function/JavaMathTwoArgumentFunction.java      |    0
 .../phoenix/expression/function/LTrimFunction.java |    0
 .../expression/function/LastValueFunction.java     |    0
 .../expression/function/LastValuesFunction.java    |    0
 .../expression/function/LengthFunction.java        |    0
 .../phoenix/expression/function/LnFunction.java    |    0
 .../phoenix/expression/function/LogFunction.java   |    0
 .../phoenix/expression/function/LowerFunction.java |    0
 .../phoenix/expression/function/LpadFunction.java  |    0
 .../phoenix/expression/function/MD5Function.java   |    0
 .../expression/function/MathPIFunction.java        |    0
 .../expression/function/MaxAggregateFunction.java  |    0
 .../expression/function/MinAggregateFunction.java  |    0
 .../expression/function/MinuteFunction.java        |    0
 .../phoenix/expression/function/MonthFunction.java |    0
 .../phoenix/expression/function/NowFunction.java   |    0
 .../expression/function/NthValueFunction.java      |    0
 .../expression/function/OctetLengthFunction.java   |    0
 .../function/PercentRankAggregateFunction.java     |    0
 .../function/PercentileContAggregateFunction.java  |    0
 .../function/PercentileDiscAggregateFunction.java  |    0
 .../function/PhoenixRowTimestampFunction.java      |    0
 .../phoenix/expression/function/PowerFunction.java |    0
 .../expression/function/PrefixFunction.java        |    0
 .../phoenix/expression/function/RTrimFunction.java |    0
 .../expression/function/RandomFunction.java        |    0
 .../expression/function/RegexpReplaceFunction.java |    0
 .../expression/function/RegexpSplitFunction.java   |    0
 .../expression/function/RegexpSubstrFunction.java  |    0
 .../expression/function/ReverseFunction.java       |    0
 .../expression/function/RoundDateExpression.java   |    0
 .../function/RoundDecimalExpression.java           |    0
 .../phoenix/expression/function/RoundFunction.java |    0
 .../function/RoundJodaDateExpression.java          |    0
 .../expression/function/RoundMonthExpression.java  |    0
 .../function/RoundTimestampExpression.java         |    0
 .../expression/function/RoundWeekExpression.java   |    0
 .../expression/function/RoundYearExpression.java   |    0
 .../function/RowKeyBytesStringFunction.java        |    0
 .../expression/function/SQLIndexTypeFunction.java  |    0
 .../expression/function/SQLTableTypeFunction.java  |    0
 .../expression/function/SQLViewTypeFunction.java   |    0
 .../expression/function/ScalarFunction.java        |    0
 .../expression/function/SecondFunction.java        |    0
 .../expression/function/SetBitFunction.java        |    0
 .../expression/function/SetByteFunction.java       |    0
 .../phoenix/expression/function/SignFunction.java  |    0
 .../phoenix/expression/function/SinFunction.java   |    0
 .../function/SingleAggregateFunction.java          |    0
 .../expression/function/SqlTypeNameFunction.java   |    0
 .../phoenix/expression/function/SqrtFunction.java  |    0
 .../expression/function/StddevPopFunction.java     |    0
 .../expression/function/StddevSampFunction.java    |    0
 .../function/StringBasedRegexpReplaceFunction.java |    0
 .../function/StringBasedRegexpSplitFunction.java   |    0
 .../function/StringBasedRegexpSubstrFunction.java  |    0
 .../expression/function/StringToArrayFunction.java |    0
 .../expression/function/SubstrFunction.java        |    0
 .../expression/function/SumAggregateFunction.java  |    0
 .../phoenix/expression/function/TanFunction.java   |    0
 .../phoenix/expression/function/TimeUnit.java      |    0
 .../function/TimezoneOffsetFunction.java           |    0
 .../expression/function/ToCharFunction.java        |    0
 .../expression/function/ToDateFunction.java        |    0
 .../expression/function/ToNumberFunction.java      |    0
 .../expression/function/ToTimeFunction.java        |    0
 .../expression/function/ToTimestampFunction.java   |    0
 .../function/TransactionProviderNameFunction.java  |    0
 .../phoenix/expression/function/TrimFunction.java  |    0
 .../phoenix/expression/function/TruncFunction.java |    0
 .../phoenix/expression/function/UDFExpression.java |    0
 .../phoenix/expression/function/UpperFunction.java |    0
 .../phoenix/expression/function/WeekFunction.java  |    0
 .../phoenix/expression/function/YearFunction.java  |    0
 .../RowValueConstructorExpressionRewriter.java     |    0
 .../expression/util/regex/AbstractBasePattern.java |    0
 .../util/regex/AbstractBaseSplitter.java           |    0
 .../expression/util/regex/GuavaSplitter.java       |    0
 .../phoenix/expression/util/regex/JONIPattern.java |    0
 .../phoenix/expression/util/regex/JavaPattern.java |    0
 .../expression/visitor/BaseExpressionVisitor.java  |    0
 .../expression/visitor/CloneExpressionVisitor.java |    0
 .../expression/visitor/ExpressionVisitor.java      |    0
 .../visitor/KeyValueExpressionVisitor.java         |    0
 .../visitor/ProjectedColumnExpressionVisitor.java  |    0
 .../ReplaceArrayFunctionExpressionVisitor.java     |    0
 .../visitor/RowKeyExpressionVisitor.java           |    0
 .../visitor/SingleAggregateFunctionVisitor.java    |    0
 .../StatelessTraverseAllExpressionVisitor.java     |    0
 .../StatelessTraverseNoExpressionVisitor.java      |    0
 .../visitor/TraverseAllExpressionVisitor.java      |    0
 .../visitor/TraverseNoExpressionVisitor.java       |    0
 .../filter/AllVersionsIndexRebuildFilter.java      |    0
 .../phoenix/filter/BooleanExpressionFilter.java    |  135 +
 .../phoenix/filter/ColumnProjectionFilter.java     |    0
 .../org/apache/phoenix/filter/DelegateFilter.java  |    0
 .../phoenix/filter/DistinctPrefixFilter.java       |    0
 .../phoenix/filter/EmptyColumnOnlyFilter.java      |    0
 .../EncodedQualifiersColumnProjectionFilter.java   |    0
 .../filter/MultiCFCQKeyValueComparisonFilter.java  |    0
 .../filter/MultiCQKeyValueComparisonFilter.java    |    0
 .../MultiEncodedCQKeyValueComparisonFilter.java    |  415 ++
 .../filter/MultiKeyValueComparisonFilter.java      |  291 +
 .../org/apache/phoenix/filter/PagingFilter.java    |    0
 .../phoenix/filter/RowKeyComparisonFilter.java     |    0
 .../filter/SingleCFCQKeyValueComparisonFilter.java |    0
 .../filter/SingleCQKeyValueComparisonFilter.java   |    0
 .../filter/SingleKeyValueComparisonFilter.java     |    0
 .../org/apache/phoenix/filter/SkipScanFilter.java  |    0
 .../filter/SystemCatalogViewIndexIdFilter.java     |  161 +
 .../apache/phoenix/filter/UnverifiedRowFilter.java |    0
 .../phoenix/hbase/index/AbstractValueGetter.java   |    0
 .../apache/phoenix/hbase/index/BaseIndexCodec.java |   34 +
 .../apache/phoenix/hbase/index/MultiMutation.java  |    0
 .../org/apache/phoenix/hbase/index/OffsetCell.java |    0
 .../apache/phoenix/hbase/index/ValueGetter.java    |    0
 .../FatalIndexBuildingFailureException.java        |    0
 .../builder/IndexBuildingFailureException.java     |    0
 .../apache/phoenix/hbase/index/covered/Batch.java  |    0
 .../phoenix/hbase/index/covered/IndexCodec.java    |   88 +
 .../phoenix/hbase/index/covered/IndexMetaData.java |   55 +
 .../phoenix/hbase/index/covered/IndexUpdate.java   |    0
 .../phoenix/hbase/index/covered/KeyValueStore.java |    0
 .../phoenix/hbase/index/covered/TableState.java    |   73 +
 .../index/covered/data/DelegateComparator.java     |    0
 .../hbase/index/covered/data/LazyValueGetter.java  |    0
 .../hbase/index/covered/data/LocalHBaseState.java  |    0
 .../filter/ApplyAndFilterDeletesFilter.java        |    0
 .../ColumnTrackingNextLargestTimestampFilter.java  |    0
 .../index/covered/filter/MaxTimestampFilter.java   |    0
 .../index/covered/filter/NewerTimestampFilter.java |    0
 .../index/covered/update/ColumnReference.java      |    0
 .../hbase/index/covered/update/ColumnTracker.java  |    0
 .../index/covered/update/IndexUpdateManager.java   |    0
 .../index/covered/update/IndexedColumnGroup.java   |    0
 .../hbase/index/exception/IndexWriteException.java |    0
 .../exception/MultiIndexWriteFailureException.java |    0
 .../SingleIndexWriteFailureException.java          |    0
 .../index/metrics/GlobalIndexCheckerSource.java    |    0
 .../metrics/GlobalIndexCheckerSourceImpl.java      |    0
 .../hbase/index/metrics/MetricsIndexerSource.java  |  212 +
 .../index/metrics/MetricsIndexerSourceFactory.java |    0
 .../index/metrics/MetricsIndexerSourceImpl.java    |    0
 .../hbase/index/parallel/BaseTaskRunner.java       |    0
 .../hbase/index/parallel/EarlyExitFailure.java     |    0
 .../index/parallel/QuickFailingTaskRunner.java     |    0
 .../apache/phoenix/hbase/index/parallel/Task.java  |    0
 .../phoenix/hbase/index/parallel/TaskBatch.java    |    0
 .../phoenix/hbase/index/parallel/TaskRunner.java   |    0
 .../hbase/index/parallel/ThreadPoolBuilder.java    |    0
 .../parallel/WaitForCompletionTaskRunner.java      |    0
 .../phoenix/hbase/index/scanner/EmptyScanner.java  |    0
 .../index/scanner/FilteredKeyValueScanner.java     |    0
 .../hbase/index/scanner/ReseekableScanner.java     |    0
 .../phoenix/hbase/index/scanner/Scanner.java       |    0
 .../hbase/index/scanner/ScannerBuilder.java        |    0
 .../phoenix/hbase/index/table/HTableFactory.java   |    0
 .../index/table/HTableInterfaceReference.java      |    0
 .../hbase/index/util/GenericKeyValueBuilder.java   |    0
 .../hbase/index/util/ImmutableBytesPtr.java        |    0
 .../hbase/index/util/IndexManagementUtil.java      |  294 +
 .../phoenix/hbase/index/util/KeyValueBuilder.java  |    0
 .../phoenix/hbase/index/util/VersionUtil.java      |    0
 .../org/apache/phoenix/index/IndexMaintainer.java  | 2299 ++++++++
 .../phoenix/index/IndexMetaDataCacheClient.java    |  158 +
 .../phoenix/index/IndexMetaDataCacheFactory.java   |   82 +
 .../phoenix/index/PhoenixIndexBuilderHelper.java   |  146 +
 .../apache/phoenix/index/PhoenixIndexCodec.java    |  139 +
 .../index/PhoenixIndexFailurePolicyHelper.java     |  245 +
 .../apache/phoenix/index/PhoenixIndexMetaData.java |  101 +
 .../phoenix/iterate/AggregatingResultIterator.java |    0
 .../BaseGroupedAggregatingResultIterator.java      |    0
 .../apache/phoenix/iterate/BaseResultIterator.java |    0
 .../phoenix/iterate/BaseResultIterators.java       | 1770 ++++++
 .../org/apache/phoenix/iterate/BufferedQueue.java  |    0
 .../phoenix/iterate/BufferedSortedQueue.java       |    0
 .../apache/phoenix/iterate/BufferedTupleQueue.java |    0
 .../phoenix/iterate/ChunkedResultIterator.java     |  249 +
 .../ClientHashAggregatingResultIterator.java       |    0
 .../phoenix/iterate/ConcatResultIterator.java      |  150 +
 .../phoenix/iterate/CursorResultIterator.java      |    0
 .../iterate/DefaultParallelScanGrouper.java        |    0
 .../iterate/DefaultTableResultIteratorFactory.java |    0
 .../phoenix/iterate/DelegateResultIterator.java    |    0
 .../iterate/DistinctAggregatingResultIterator.java |    0
 .../org/apache/phoenix/iterate/ExplainTable.java   |  550 ++
 .../iterate/FilterAggregatingResultIterator.java   |    0
 .../phoenix/iterate/FilterResultIterator.java      |    0
 .../iterate/GroupedAggregatingResultIterator.java  |    0
 .../iterate/LimitingPeekingResultIterator.java     |    0
 .../phoenix/iterate/LimitingResultIterator.java    |    0
 .../phoenix/iterate/LookAheadResultIterator.java   |    0
 .../MaterializedComparableResultIterator.java      |    0
 .../iterate/MaterializedResultIterator.java        |    0
 .../phoenix/iterate/MergeSortResultIterator.java   |    0
 .../iterate/MergeSortRowKeyResultIterator.java     |    0
 .../iterate/MergeSortTopNResultIterator.java       |    0
 .../phoenix/iterate/OffsetResultIterator.java      |    0
 .../iterate/OrderedAggregatingResultIterator.java  |    0
 .../phoenix/iterate/OrderedResultIterator.java     |  426 ++
 .../phoenix/iterate/ParallelIteratorFactory.java   |    0
 .../iterate/ParallelIteratorRegionSplitter.java    |    0
 .../apache/phoenix/iterate/ParallelIterators.java  |    0
 .../phoenix/iterate/ParallelScanGrouper.java       |    0
 .../phoenix/iterate/ParallelScansCollector.java    |    0
 .../phoenix/iterate/PeekingResultIterator.java     |    0
 .../org/apache/phoenix/iterate/PhoenixQueues.java  |    0
 .../org/apache/phoenix/iterate/ResultIterator.java |    0
 .../apache/phoenix/iterate/ResultIterators.java    |    0
 .../phoenix/iterate/RoundRobinResultIterator.java  |  354 ++
 .../RowKeyOrderedAggregateResultIterator.java      |  200 +
 .../phoenix/iterate/ScanningResultIterator.java    |  236 +
 .../phoenix/iterate/ScansWithRegionLocations.java  |    0
 .../phoenix/iterate/SequenceResultIterator.java    |    0
 .../apache/phoenix/iterate/SerialIterators.java    |  231 +
 .../org/apache/phoenix/iterate/SizeAwareQueue.java |    0
 .../org/apache/phoenix/iterate/SizeBoundQueue.java |    0
 .../iterate/SpoolTooBigToDiskException.java        |    0
 .../phoenix/iterate/SpoolingResultIterator.java    |  381 ++
 .../phoenix/iterate/TableResultIterator.java       |  344 ++
 .../iterate/TableResultIteratorFactory.java        |    0
 .../phoenix/iterate/TableSamplerPredicate.java     |    0
 .../UngroupedAggregatingResultIterator.java        |    0
 .../phoenix/iterate/UnionResultIterators.java      |  166 +
 .../phoenix/jdbc/AbstractRPCConnectionInfo.java    |    0
 .../org/apache/phoenix/jdbc/ClusterRoleRecord.java |    0
 .../jdbc/ClusterRoleRecordGeneratorTool.java       |    0
 .../org/apache/phoenix/jdbc/ConnectionInfo.java    |    0
 .../apache/phoenix/jdbc/DelegateConnection.java    |    0
 .../phoenix/jdbc/DelegatePreparedStatement.java    |    0
 .../org/apache/phoenix/jdbc/DelegateResultSet.java |    0
 .../org/apache/phoenix/jdbc/DelegateStatement.java |    0
 .../phoenix/jdbc/FailoverPhoenixConnection.java    |    0
 .../org/apache/phoenix/jdbc/FailoverPolicy.java    |    0
 .../apache/phoenix/jdbc/HighAvailabilityGroup.java |    0
 .../phoenix/jdbc/HighAvailabilityPolicy.java       |    0
 .../phoenix/jdbc/LoggingPhoenixConnection.java     |    0
 .../jdbc/LoggingPhoenixPreparedStatement.java      |    0
 .../phoenix/jdbc/LoggingPhoenixResultSet.java      |    0
 .../phoenix/jdbc/LoggingPhoenixStatement.java      |    0
 .../apache/phoenix/jdbc/MasterConnectionInfo.java  |    0
 .../phoenix/jdbc/ParallelPhoenixConnection.java    |    0
 .../phoenix/jdbc/ParallelPhoenixContext.java       |    0
 .../phoenix/jdbc/ParallelPhoenixMetrics.java       |    0
 .../ParallelPhoenixNullComparingResultSet.java     |    0
 .../jdbc/ParallelPhoenixPreparedStatement.java     |    0
 .../phoenix/jdbc/ParallelPhoenixResultSet.java     |    0
 .../jdbc/ParallelPhoenixResultSetFactory.java      |    0
 .../phoenix/jdbc/ParallelPhoenixStatement.java     |    0
 .../apache/phoenix/jdbc/ParallelPhoenixUtil.java   |    0
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |    0
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java      | 1951 ++++++
 .../org/apache/phoenix/jdbc/PhoenixDriver.java     |    0
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  198 +
 .../apache/phoenix/jdbc/PhoenixHAAdminTool.java    |    0
 .../jdbc/PhoenixHAExecutorServiceProvider.java     |    0
 .../apache/phoenix/jdbc/PhoenixHAGroupMetrics.java |    0
 .../apache/phoenix/jdbc/PhoenixMetricsHolder.java  |    0
 .../org/apache/phoenix/jdbc/PhoenixMetricsLog.java |    0
 .../phoenix/jdbc/PhoenixMonitoredConnection.java   |    0
 .../jdbc/PhoenixMonitoredPreparedStatement.java    |    0
 .../phoenix/jdbc/PhoenixMonitoredResultSet.java    |    0
 .../phoenix/jdbc/PhoenixMonitoredStatement.java    |    0
 .../phoenix/jdbc/PhoenixParameterMetaData.java     |    0
 .../phoenix/jdbc/PhoenixPreparedStatement.java     |    0
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  | 1649 ++++++
 .../phoenix/jdbc/PhoenixResultSetMetaData.java     |    0
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  | 2598 ++++++++
 .../phoenix/jdbc/PhoenixStatementFactory.java      |    0
 .../org/apache/phoenix/jdbc/RPCConnectionInfo.java |    0
 .../org/apache/phoenix/jdbc/ZKConnectionInfo.java  |    0
 .../phoenix/job/AbstractRoundRobinQueue.java       |    0
 .../java/org/apache/phoenix/job/JobManager.java    |    0
 .../org/apache/phoenix/join/HashCacheClient.java   |  228 +
 .../org/apache/phoenix/join/HashCacheFactory.java  |  174 +
 .../java/org/apache/phoenix/join/HashJoinInfo.java |    0
 .../join/MaxServerCacheSizeExceededException.java  |    0
 .../org/apache/phoenix/log/ActivityLogInfo.java    |    0
 .../org/apache/phoenix/log/AuditQueryLogger.java   |    0
 .../apache/phoenix/log/BaseConnectionLimiter.java  |    0
 .../phoenix/log/ConnectionActivityLogger.java      |    0
 .../org/apache/phoenix/log/ConnectionLimiter.java  |    0
 .../phoenix/log/DefaultConnectionLimiter.java      |    0
 .../main/java/org/apache/phoenix/log/LogLevel.java |    0
 .../java/org/apache/phoenix/log/LogWriter.java     |    0
 .../phoenix/log/LoggingConnectionLimiter.java      |    0
 .../phoenix/log/QueryLogDetailsWorkHandler.java    |    0
 .../java/org/apache/phoenix/log/QueryLogInfo.java  |    0
 .../java/org/apache/phoenix/log/QueryLogger.java   |    0
 .../log/QueryLoggerDefaultExceptionHandler.java    |    0
 .../apache/phoenix/log/QueryLoggerDisruptor.java   |    0
 .../org/apache/phoenix/log/QueryLoggerUtil.java    |    0
 .../java/org/apache/phoenix/log/QueryStatus.java   |    0
 .../org/apache/phoenix/log/RingBufferEvent.java    |    0
 .../phoenix/log/RingBufferEventTranslator.java     |    0
 .../org/apache/phoenix/log/TableLogWriter.java     |    0
 .../phoenix/mapreduce/util/ConnectionUtil.java     |  141 +
 .../util/PhoenixConfigurationUtilHelper.java       |  148 +
 .../apache/phoenix/memory/ChildMemoryManager.java  |    0
 .../phoenix/memory/DelegatingMemoryManager.java    |    0
 .../apache/phoenix/memory/GlobalMemoryManager.java |    0
 .../memory/InsufficientMemoryException.java        |    0
 .../org/apache/phoenix/memory/MemoryManager.java   |    0
 .../org/apache/phoenix/metrics/MetricInfo.java     |    0
 .../java/org/apache/phoenix/metrics/Metrics.java   |    0
 .../apache/phoenix/monitoring/AtomicMetric.java    |    0
 .../phoenix/monitoring/CombinableMetric.java       |    0
 .../phoenix/monitoring/CombinableMetricImpl.java   |    0
 .../monitoring/ConnectionQueryServicesMetric.java  |    0
 .../ConnectionQueryServicesMetricImpl.java         |    0
 .../phoenix/monitoring/GlobalClientMetrics.java    |    0
 .../apache/phoenix/monitoring/GlobalMetric.java    |    0
 .../phoenix/monitoring/GlobalMetricImpl.java       |    0
 .../monitoring/GlobalMetricRegistriesAdapter.java  |    0
 .../phoenix/monitoring/HistogramDistribution.java  |    0
 .../monitoring/HistogramDistributionImpl.java      |    0
 .../phoenix/monitoring/JmxMetricProvider.java      |    0
 .../phoenix/monitoring/LatencyHistogram.java       |   46 +
 .../phoenix/monitoring/MemoryMetricsHolder.java    |    0
 .../java/org/apache/phoenix/monitoring/Metric.java |    0
 .../monitoring/MetricPublisherSupplierFactory.java |    0
 .../phoenix/monitoring/MetricServiceResolver.java  |    0
 .../org/apache/phoenix/monitoring/MetricType.java  |    0
 .../org/apache/phoenix/monitoring/MetricUtil.java  |    0
 .../apache/phoenix/monitoring/MetricsRegistry.java |    0
 .../phoenix/monitoring/MetricsStopWatch.java       |    0
 .../phoenix/monitoring/MutationMetricQueue.java    |    0
 .../phoenix/monitoring/NoOpGlobalMetricImpl.java   |    0
 .../monitoring/NoOpTableMetricsManager.java        |    0
 .../apache/phoenix/monitoring/NonAtomicMetric.java |    0
 .../phoenix/monitoring/OverAllQueryMetrics.java    |    0
 .../phoenix/monitoring/PhoenixTableMetric.java     |    0
 .../phoenix/monitoring/PhoenixTableMetricImpl.java |    0
 .../apache/phoenix/monitoring/RangeHistogram.java  |    0
 .../apache/phoenix/monitoring/ReadMetricQueue.java |    0
 .../phoenix/monitoring/ScanMetricsHolder.java      |    0
 .../apache/phoenix/monitoring/SizeHistogram.java   |   47 +
 .../phoenix/monitoring/SpoolingMetricsHolder.java  |    0
 .../phoenix/monitoring/TableClientMetrics.java     |    0
 .../apache/phoenix/monitoring/TableHistograms.java |    0
 .../phoenix/monitoring/TableMetricsManager.java    |    0
 .../monitoring/TaskExecutionMetricsHolder.java     |    0
 .../ConnectionQueryServicesHistogram.java          |   43 +
 .../ConnectionQueryServicesMetrics.java            |    0
 .../ConnectionQueryServicesMetricsHistograms.java  |    0
 .../ConnectionQueryServicesMetricsManager.java     |    0
 .../NoOpConnectionQueryServicesMetricsManager.java |    0
 .../java/org/apache/phoenix/optimize/Cost.java     |    0
 .../optimize/GenSubqueryParamValuesRewriter.java   |    0
 .../apache/phoenix/optimize/QueryOptimizer.java    |    0
 .../apache/phoenix/parse/AddColumnStatement.java   |    0
 .../org/apache/phoenix/parse/AddJarsStatement.java |    0
 .../org/apache/phoenix/parse/AddParseNode.java     |    0
 .../phoenix/parse/AggregateFunctionParseNode.java  |    0
 .../AggregateFunctionWithinGroupParseNode.java     |    0
 .../java/org/apache/phoenix/parse/AliasedNode.java |    0
 .../apache/phoenix/parse/AlterIndexStatement.java  |    0
 .../phoenix/parse/AlterSessionStatement.java       |    0
 .../apache/phoenix/parse/AlterTableStatement.java  |    0
 .../phoenix/parse/AndBooleanParseNodeVisitor.java  |    0
 .../org/apache/phoenix/parse/AndParseNode.java     |    0
 .../parse/AndRewriterBooleanParseNodeVisitor.java  |    0
 .../apache/phoenix/parse/ArithmeticParseNode.java  |    0
 .../phoenix/parse/ArrayAllAnyComparisonNode.java   |    0
 .../phoenix/parse/ArrayAllComparisonNode.java      |    0
 .../phoenix/parse/ArrayAnyComparisonNode.java      |    0
 .../apache/phoenix/parse/ArrayConstructorNode.java |    0
 .../org/apache/phoenix/parse/ArrayElemRefNode.java |    0
 .../phoenix/parse/ArrayModifierParseNode.java      |    0
 .../phoenix/parse/AvgAggregateParseNode.java       |    0
 .../apache/phoenix/parse/BaseParseNodeVisitor.java |    0
 .../org/apache/phoenix/parse/BetweenParseNode.java |    0
 .../org/apache/phoenix/parse/BinaryParseNode.java  |    0
 .../org/apache/phoenix/parse/BindParseNode.java    |    0
 .../org/apache/phoenix/parse/BindTableNode.java    |    0
 .../apache/phoenix/parse/BindableStatement.java    |    0
 .../phoenix/parse/BooleanParseNodeVisitor.java     |    0
 .../org/apache/phoenix/parse/CaseParseNode.java    |    0
 .../org/apache/phoenix/parse/CastParseNode.java    |    0
 .../org/apache/phoenix/parse/CeilParseNode.java    |    0
 .../apache/phoenix/parse/ChangePermsStatement.java |    0
 .../org/apache/phoenix/parse/CloseStatement.java   |    0
 .../java/org/apache/phoenix/parse/ColumnDef.java   |    0
 .../phoenix/parse/ColumnDefInPkConstraint.java     |    0
 .../org/apache/phoenix/parse/ColumnFamilyDef.java  |    0
 .../java/org/apache/phoenix/parse/ColumnName.java  |    0
 .../org/apache/phoenix/parse/ColumnParseNode.java  |    0
 .../apache/phoenix/parse/ComparisonParseNode.java  |    0
 .../apache/phoenix/parse/CompoundParseNode.java    |    0
 .../apache/phoenix/parse/ConcreteTableNode.java    |    0
 .../phoenix/parse/CreateFunctionStatement.java     |    0
 .../apache/phoenix/parse/CreateIndexStatement.java |    0
 .../phoenix/parse/CreateSchemaStatement.java       |    0
 .../phoenix/parse/CreateSequenceStatement.java     |    0
 .../apache/phoenix/parse/CreateTableStatement.java |    0
 .../apache/phoenix/parse/CurrentDateParseNode.java |    0
 .../apache/phoenix/parse/CurrentTimeParseNode.java |    0
 .../java/org/apache/phoenix/parse/CursorName.java  |    0
 .../org/apache/phoenix/parse/DMLStatement.java     |    0
 .../phoenix/parse/DeclareCursorStatement.java      |    0
 .../parse/DelegateConstantToCountParseNode.java    |    0
 .../apache/phoenix/parse/DeleteJarStatement.java   |    0
 .../org/apache/phoenix/parse/DeleteStatement.java  |    0
 .../org/apache/phoenix/parse/DerivedTableNode.java |    0
 ...DistinctCountHyperLogLogAggregateParseNode.java |    0
 .../phoenix/parse/DistinctCountParseNode.java      |    0
 .../org/apache/phoenix/parse/DivideParseNode.java  |    0
 .../apache/phoenix/parse/DropColumnStatement.java  |    0
 .../phoenix/parse/DropFunctionStatement.java       |    0
 .../apache/phoenix/parse/DropIndexStatement.java   |    0
 .../apache/phoenix/parse/DropSchemaStatement.java  |    0
 .../phoenix/parse/DropSequenceStatement.java       |    0
 .../apache/phoenix/parse/DropTableStatement.java   |    0
 .../org/apache/phoenix/parse/EqualParseNode.java   |    0
 .../phoenix/parse/ExecuteUpgradeStatement.java     |    0
 .../org/apache/phoenix/parse/ExistsParseNode.java  |    0
 .../org/apache/phoenix/parse/ExplainStatement.java |    0
 .../java/org/apache/phoenix/parse/ExplainType.java |    0
 .../phoenix/parse/FamilyWildcardParseNode.java     |    0
 .../org/apache/phoenix/parse/FetchStatement.java   |    0
 .../apache/phoenix/parse/FilterableStatement.java  |    0
 .../parse/FirstValueAggregateParseNode.java        |    0
 .../parse/FirstValuesAggregateParseNode.java       |    0
 .../org/apache/phoenix/parse/FloorParseNode.java   |    0
 .../apache/phoenix/parse/FunctionParseNode.java    |    0
 .../phoenix/parse/GreaterThanOrEqualParseNode.java |    0
 .../apache/phoenix/parse/GreaterThanParseNode.java |    0
 .../java/org/apache/phoenix/parse/HintNode.java    |    0
 .../org/apache/phoenix/parse/InListParseNode.java  |    0
 .../java/org/apache/phoenix/parse/InParseNode.java |    0
 .../parse/IndexExpressionParseNodeRewriter.java    |    0
 .../apache/phoenix/parse/IndexKeyConstraint.java   |    0
 .../org/apache/phoenix/parse/IsNullParseNode.java  |    0
 .../org/apache/phoenix/parse/JoinTableNode.java    |    0
 .../phoenix/parse/LastValueAggregateParseNode.java |    0
 .../parse/LastValuesAggregateParseNode.java        |    0
 .../phoenix/parse/LessThanOrEqualParseNode.java    |    0
 .../apache/phoenix/parse/LessThanParseNode.java    |    0
 .../org/apache/phoenix/parse/LikeParseNode.java    |    0
 .../java/org/apache/phoenix/parse/LimitNode.java   |    0
 .../apache/phoenix/parse/ListJarsStatement.java    |    0
 .../org/apache/phoenix/parse/LiteralParseNode.java |    0
 .../phoenix/parse/MaxAggregateParseNode.java       |    0
 .../phoenix/parse/MinAggregateParseNode.java       |    0
 .../org/apache/phoenix/parse/ModulusParseNode.java |    0
 .../apache/phoenix/parse/MultiplyParseNode.java    |    0
 .../org/apache/phoenix/parse/MutableStatement.java |    0
 .../java/org/apache/phoenix/parse/NamedNode.java   |    0
 .../org/apache/phoenix/parse/NamedParseNode.java   |    0
 .../org/apache/phoenix/parse/NamedTableNode.java   |    0
 .../apache/phoenix/parse/NotEqualParseNode.java    |    0
 .../org/apache/phoenix/parse/NotParseNode.java     |    0
 .../phoenix/parse/NthValueAggregateParseNode.java  |    0
 .../java/org/apache/phoenix/parse/OffsetNode.java  |    0
 .../org/apache/phoenix/parse/OpenStatement.java    |    0
 .../java/org/apache/phoenix/parse/OrParseNode.java |    0
 .../java/org/apache/phoenix/parse/OrderByNode.java |    0
 .../java/org/apache/phoenix/parse/PFunction.java   |    0
 .../java/org/apache/phoenix/parse/PSchema.java     |    0
 .../org/apache/phoenix/parse/ParseContext.java     |    0
 .../org/apache/phoenix/parse/ParseException.java   |    0
 .../java/org/apache/phoenix/parse/ParseNode.java   |    0
 .../org/apache/phoenix/parse/ParseNodeFactory.java |    0
 .../apache/phoenix/parse/ParseNodeRewriter.java    |    0
 .../org/apache/phoenix/parse/ParseNodeVisitor.java |    0
 .../parse/PhoenixRowTimestampParseNode.java        |   98 +
 .../apache/phoenix/parse/PrimaryKeyConstraint.java |    0
 .../org/apache/phoenix/parse/PropertyName.java     |    0
 .../phoenix/parse/RegexpReplaceParseNode.java      |    0
 .../apache/phoenix/parse/RegexpSplitParseNode.java |    0
 .../phoenix/parse/RegexpSubstrParseNode.java       |    0
 .../org/apache/phoenix/parse/RoundParseNode.java   |    0
 .../parse/RowValueConstructorParseNode.java        |    0
 .../java/org/apache/phoenix/parse/SQLParser.java   |    0
 .../org/apache/phoenix/parse/SelectStatement.java  |    0
 .../phoenix/parse/SelectStatementRewriter.java     |    0
 .../phoenix/parse/SequenceValueParseNode.java      |    0
 .../org/apache/phoenix/parse/ShowCreateTable.java  |    0
 .../phoenix/parse/ShowCreateTableStatement.java    |    0
 .../apache/phoenix/parse/ShowSchemasStatement.java |    0
 .../org/apache/phoenix/parse/ShowStatement.java    |    0
 .../apache/phoenix/parse/ShowTablesStatement.java  |    0
 .../apache/phoenix/parse/SingleTableStatement.java |    0
 .../StatelessTraverseAllParseNodeVisitor.java      |    0
 .../phoenix/parse/StringConcatParseNode.java       |    0
 .../apache/phoenix/parse/SubqueryParseNode.java    |    0
 .../apache/phoenix/parse/SubtractParseNode.java    |    0
 .../phoenix/parse/SumAggregateParseNode.java       |    0
 .../java/org/apache/phoenix/parse/TableName.java   |    0
 .../java/org/apache/phoenix/parse/TableNode.java   |    0
 .../org/apache/phoenix/parse/TableNodeVisitor.java |    0
 .../phoenix/parse/TableWildcardParseNode.java      |    0
 .../apache/phoenix/parse/TerminalParseNode.java    |    0
 .../org/apache/phoenix/parse/ToCharParseNode.java  |    0
 .../org/apache/phoenix/parse/ToDateParseNode.java  |    0
 .../apache/phoenix/parse/ToNumberParseNode.java    |    0
 .../org/apache/phoenix/parse/ToTimeParseNode.java  |    0
 .../apache/phoenix/parse/ToTimestampParseNode.java |    0
 .../org/apache/phoenix/parse/TraceStatement.java   |    0
 .../phoenix/parse/TraverseAllParseNodeVisitor.java |    0
 .../phoenix/parse/TraverseNoParseNodeVisitor.java  |    0
 .../org/apache/phoenix/parse/UDFParseNode.java     |    0
 .../org/apache/phoenix/parse/UnaryParseNode.java   |    0
 .../parse/UnsupportedAllParseNodeVisitor.java      |    0
 .../phoenix/parse/UpdateStatisticsStatement.java   |    0
 .../org/apache/phoenix/parse/UpsertStatement.java  |    0
 .../apache/phoenix/parse/UseSchemaStatement.java   |    0
 .../apache/phoenix/parse/WildcardParseNode.java    |    0
 .../org/apache/phoenix/protobuf/ProtobufUtil.java  |    0
 .../phoenix/query/AdminUtilWithFallback.java       |    0
 .../phoenix/query/BaseQueryServicesImpl.java       |    0
 .../query/ChildLinkMetaDataServiceCallBack.java    |    0
 .../apache/phoenix/query/ChildQueryServices.java   |    0
 .../apache/phoenix/query/ConfigurationFactory.java |    0
 .../phoenix/query/ConnectionQueryServices.java     |  230 +
 .../phoenix/query/ConnectionQueryServicesImpl.java | 6183 +++++++++++++++++++
 .../query/ConnectionlessQueryServicesImpl.java     |  817 +++
 .../query/DefaultGuidePostsCacheFactory.java       |    0
 .../query/DelegateConnectionQueryServices.java     |  424 ++
 .../phoenix/query/DelegateQueryServices.java       |    0
 .../org/apache/phoenix/query/EmptyStatsLoader.java |    0
 .../org/apache/phoenix/query/GuidePostsCache.java  |    0
 .../phoenix/query/GuidePostsCacheFactory.java      |    0
 .../apache/phoenix/query/GuidePostsCacheImpl.java  |    0
 .../phoenix/query/GuidePostsCacheProvider.java     |    0
 .../phoenix/query/GuidePostsCacheWrapper.java      |    0
 .../apache/phoenix/query/HBaseFactoryProvider.java |    0
 .../apache/phoenix/query/HConnectionFactory.java   |    0
 .../org/apache/phoenix/query/HTableFactory.java    |    0
 .../phoenix/query/ITGuidePostsCacheFactory.java    |    0
 .../java/org/apache/phoenix/query/KeyRange.java    |    0
 .../org/apache/phoenix/query/MetaDataMutated.java  |    0
 .../phoenix/query/PhoenixStatsCacheLoader.java     |    0
 .../apache/phoenix/query/PhoenixStatsLoader.java   |    0
 .../phoenix/query/PropertyNotAllowedException.java |    0
 .../org/apache/phoenix/query/PropertyPolicy.java   |    0
 .../phoenix/query/PropertyPolicyProvider.java      |    0
 .../org/apache/phoenix/query/QueryConstants.java   |  617 ++
 .../org/apache/phoenix/query/QueryServices.java    |  470 ++
 .../apache/phoenix/query/QueryServicesImpl.java    |    0
 .../apache/phoenix/query/QueryServicesOptions.java |  972 +++
 .../org/apache/phoenix/query/StatsLoaderImpl.java  |    0
 .../phoenix/schema/AmbiguousColumnException.java   |    0
 .../phoenix/schema/AmbiguousTableException.java    |    0
 .../schema/ArgumentTypeMismatchException.java      |    0
 .../schema/ColumnAlreadyExistsException.java       |    0
 .../schema/ColumnFamilyNotFoundException.java      |    0
 .../apache/phoenix/schema/ColumnMetaDataOps.java   |    0
 .../org/apache/phoenix/schema/ColumnModifier.java  |    0
 .../phoenix/schema/ColumnNotFoundException.java    |    0
 .../java/org/apache/phoenix/schema/ColumnRef.java  |    0
 .../apache/phoenix/schema/ColumnValueDecoder.java  |    0
 .../apache/phoenix/schema/ColumnValueEncoder.java  |    0
 .../schema/ConcurrentTableMutationException.java   |    0
 .../apache/phoenix/schema/ConnectionProperty.java  |    0
 .../schema/ConstraintViolationException.java       |    0
 .../org/apache/phoenix/schema/DelegateColumn.java  |    0
 .../org/apache/phoenix/schema/DelegateDatum.java   |    0
 .../phoenix/schema/DelegateSQLException.java       |    0
 .../org/apache/phoenix/schema/DelegateTable.java   |    0
 .../schema/EmptySequenceCacheException.java        |    0
 .../schema/ExecuteQueryNotApplicableException.java |    0
 .../ExecuteUpdateNotApplicableException.java       |    0
 .../schema/FunctionAlreadyExistsException.java     |    0
 .../phoenix/schema/FunctionNotFoundException.java  |    0
 .../phoenix/schema/IllegalDataException.java       |    0
 .../phoenix/schema/IndexNotFoundException.java     |    0
 .../schema/IndexUncoveredDataColumnRef.java        |    0
 .../org/apache/phoenix/schema/KeyValueSchema.java  |    0
 .../MaxMutationSizeBytesExceededException.java     |    0
 .../schema/MaxMutationSizeExceededException.java   |    0
 .../MaxPhoenixColumnSizeExceededException.java     |    0
 .../org/apache/phoenix/schema/MetaDataClient.java  | 6019 +++++++++++++++++++
 .../schema/MetaDataEntityNotFoundException.java    |    0
 .../NewerFunctionAlreadyExistsException.java       |    0
 .../schema/NewerSchemaAlreadyExistsException.java  |    0
 .../schema/NewerTableAlreadyExistsException.java   |    0
 .../java/org/apache/phoenix/schema/PColumn.java    |    0
 .../org/apache/phoenix/schema/PColumnFamily.java   |    0
 .../apache/phoenix/schema/PColumnFamilyImpl.java   |    0
 .../org/apache/phoenix/schema/PColumnImpl.java     |    0
 .../java/org/apache/phoenix/schema/PDatum.java     |    0
 .../org/apache/phoenix/schema/PIndexState.java     |    0
 .../java/org/apache/phoenix/schema/PMetaData.java  |    0
 .../org/apache/phoenix/schema/PMetaDataCache.java  |    0
 .../org/apache/phoenix/schema/PMetaDataEntity.java |    0
 .../org/apache/phoenix/schema/PMetaDataImpl.java   |    0
 .../main/java/org/apache/phoenix/schema/PName.java |    0
 .../org/apache/phoenix/schema/PNameFactory.java    |    0
 .../java/org/apache/phoenix/schema/PNameImpl.java  |    0
 .../main/java/org/apache/phoenix/schema/PRow.java  |   87 +
 .../phoenix/schema/PSynchronizedMetaData.java      |    0
 .../java/org/apache/phoenix/schema/PTable.java     |    0
 .../java/org/apache/phoenix/schema/PTableImpl.java | 2467 ++++++++
 .../java/org/apache/phoenix/schema/PTableKey.java  |    0
 .../java/org/apache/phoenix/schema/PTableRef.java  |    0
 .../apache/phoenix/schema/PTableRefFactory.java    |    0
 .../org/apache/phoenix/schema/PTableRefImpl.java   |    0
 .../java/org/apache/phoenix/schema/PTableType.java |    0
 .../org/apache/phoenix/schema/ProjectedColumn.java |    0
 .../phoenix/schema/ReadOnlyTableException.java     |    0
 .../org/apache/phoenix/schema/RowKeySchema.java    |    0
 .../apache/phoenix/schema/RowKeyValueAccessor.java |    0
 ...lueConstructorOffsetInternalErrorException.java |    0
 ...onstructorOffsetNotAllowedInQueryException.java |    0
 ...alueConstructorOffsetNotCoercibleException.java |    0
 .../org/apache/phoenix/schema/SaltingUtil.java     |    0
 .../schema/SchemaAlreadyExistsException.java       |    0
 .../phoenix/schema}/SchemaExtractionProcessor.java |    0
 .../phoenix/schema/SchemaNotFoundException.java    |    0
 .../java/org/apache/phoenix/schema/Sequence.java   |  639 ++
 .../apache/phoenix/schema/SequenceAllocation.java  |    0
 .../schema/SequenceAlreadyExistsException.java     |    0
 .../org/apache/phoenix/schema/SequenceInfo.java    |    0
 .../org/apache/phoenix/schema/SequenceKey.java     |    0
 .../phoenix/schema/SequenceNotFoundException.java  |    0
 .../apache/phoenix/schema/SerializedPTableRef.java |    0
 .../phoenix/schema/SerializedPTableRefFactory.java |    0
 .../java/org/apache/phoenix/schema/SortOrder.java  |    0
 .../schema/StaleRegionBoundaryCacheException.java  |    0
 .../schema/TableAlreadyExistsException.java        |    0
 .../phoenix/schema/TableNotFoundException.java     |    0
 .../org/apache/phoenix/schema/TableProperty.java   |    0
 .../java/org/apache/phoenix/schema/TableRef.java   |    0
 .../phoenix/schema/TablesNotInSyncException.java   |    0
 .../phoenix/schema/TypeMismatchException.java      |    0
 .../UpsertColumnsValuesMismatchException.java      |    0
 .../org/apache/phoenix/schema/ValueBitSet.java     |    0
 .../apache/phoenix/schema/ValueRangeExcpetion.java |    0
 .../org/apache/phoenix/schema/ValueSchema.java     |    0
 .../export/DefaultSchemaRegistryRepository.java    |    0
 .../phoenix/schema/export/DefaultSchemaWriter.java |    0
 .../phoenix/schema/export/SchemaImporter.java      |    0
 .../schema/export/SchemaRegistryRepository.java    |    0
 .../export/SchemaRegistryRepositoryFactory.java    |    0
 .../apache/phoenix/schema/export/SchemaWriter.java |    0
 .../phoenix/schema/export/SchemaWriterFactory.java |    0
 .../schema/metrics/MetricsMetadataSource.java      |    0
 .../metrics/MetricsMetadataSourceFactory.java      |    0
 .../schema/metrics/MetricsMetadataSourceImpl.java  |    0
 .../phoenix/schema/stats/GuidePostsInfo.java       |    0
 .../schema/stats/GuidePostsInfoBuilder.java        |    0
 .../apache/phoenix/schema/stats/GuidePostsKey.java |    0
 .../stats/StatisticsCollectionRunTracker.java      |    0
 .../schema/stats/StatisticsCollectionScope.java    |    0
 .../phoenix/schema/stats/StatisticsUtil.java       |  253 +
 .../StatsCollectionDisabledOnServerException.java  |    0
 .../phoenix/schema/task/SystemTaskParams.java      |    0
 .../java/org/apache/phoenix/schema/task/Task.java  |  423 ++
 .../phoenix/schema/tool/SchemaProcessor.java       |    0
 .../apache/phoenix/schema/tool/SchemaSQLUtil.java  |    0
 .../schema/tool/SchemaSynthesisProcessor.java      |    0
 .../org/apache/phoenix/schema/tool/SchemaTool.java |    0
 .../schema/transform/SystemTransformRecord.java    |    0
 .../phoenix/schema/transform/TransformClient.java  |  432 ++
 .../schema/transform/TransformMaintainer.java      |    0
 .../org/apache/phoenix/schema/tuple/BaseTuple.java |    0
 .../apache/phoenix/schema/tuple/DelegateTuple.java |    0
 .../tuple/EncodedColumnQualiferCellsList.java      |    0
 .../phoenix/schema/tuple/MultiKeyValueTuple.java   |    0
 .../tuple/PositionBasedMultiKeyValueTuple.java     |    0
 .../schema/tuple/PositionBasedResultTuple.java     |    0
 .../apache/phoenix/schema/tuple/ResultTuple.java   |    0
 .../phoenix/schema/tuple/SingleKeyValueTuple.java  |    0
 .../org/apache/phoenix/schema/tuple/Tuple.java     |    0
 .../phoenix/schema/tuple/ValueGetterTuple.java     |    0
 .../phoenix/schema/types/PArrayDataType.java       |    0
 .../schema/types/PArrayDataTypeDecoder.java        |    0
 .../schema/types/PArrayDataTypeEncoder.java        |    0
 .../org/apache/phoenix/schema/types/PBinary.java   |    0
 .../apache/phoenix/schema/types/PBinaryArray.java  |    0
 .../apache/phoenix/schema/types/PBinaryBase.java   |    0
 .../org/apache/phoenix/schema/types/PBoolean.java  |    0
 .../apache/phoenix/schema/types/PBooleanArray.java |    0
 .../org/apache/phoenix/schema/types/PChar.java     |    0
 .../apache/phoenix/schema/types/PCharArray.java    |    0
 .../org/apache/phoenix/schema/types/PDataType.java |    0
 .../phoenix/schema/types/PDataTypeFactory.java     |    0
 .../org/apache/phoenix/schema/types/PDate.java     |    0
 .../apache/phoenix/schema/types/PDateArray.java    |    0
 .../org/apache/phoenix/schema/types/PDecimal.java  |    0
 .../apache/phoenix/schema/types/PDecimalArray.java |    0
 .../org/apache/phoenix/schema/types/PDouble.java   |    0
 .../apache/phoenix/schema/types/PDoubleArray.java  |    0
 .../org/apache/phoenix/schema/types/PFloat.java    |    0
 .../apache/phoenix/schema/types/PFloatArray.java   |    0
 .../org/apache/phoenix/schema/types/PInteger.java  |    0
 .../apache/phoenix/schema/types/PIntegerArray.java |    0
 .../org/apache/phoenix/schema/types/PLong.java     |    0
 .../apache/phoenix/schema/types/PLongArray.java    |    0
 .../apache/phoenix/schema/types/PNumericType.java  |    0
 .../apache/phoenix/schema/types/PRealNumber.java   |    0
 .../org/apache/phoenix/schema/types/PSmallint.java |    0
 .../phoenix/schema/types/PSmallintArray.java       |    0
 .../org/apache/phoenix/schema/types/PTime.java     |    0
 .../apache/phoenix/schema/types/PTimeArray.java    |    0
 .../apache/phoenix/schema/types/PTimestamp.java    |    0
 .../phoenix/schema/types/PTimestampArray.java      |    0
 .../org/apache/phoenix/schema/types/PTinyint.java  |    0
 .../apache/phoenix/schema/types/PTinyintArray.java |    0
 .../apache/phoenix/schema/types/PUnsignedDate.java |    0
 .../phoenix/schema/types/PUnsignedDateArray.java   |    0
 .../phoenix/schema/types/PUnsignedDouble.java      |    0
 .../phoenix/schema/types/PUnsignedDoubleArray.java |    0
 .../phoenix/schema/types/PUnsignedFloat.java       |    0
 .../phoenix/schema/types/PUnsignedFloatArray.java  |    0
 .../apache/phoenix/schema/types/PUnsignedInt.java  |    0
 .../phoenix/schema/types/PUnsignedIntArray.java    |    0
 .../apache/phoenix/schema/types/PUnsignedLong.java |    0
 .../phoenix/schema/types/PUnsignedLongArray.java   |    0
 .../phoenix/schema/types/PUnsignedSmallint.java    |    0
 .../schema/types/PUnsignedSmallintArray.java       |    0
 .../apache/phoenix/schema/types/PUnsignedTime.java |    0
 .../phoenix/schema/types/PUnsignedTimeArray.java   |    0
 .../phoenix/schema/types/PUnsignedTimestamp.java   |    0
 .../schema/types/PUnsignedTimestampArray.java      |    0
 .../phoenix/schema/types/PUnsignedTinyint.java     |    0
 .../schema/types/PUnsignedTinyintArray.java        |    0
 .../apache/phoenix/schema/types/PVarbinary.java    |    0
 .../phoenix/schema/types/PVarbinaryArray.java      |    0
 .../org/apache/phoenix/schema/types/PVarchar.java  |    0
 .../apache/phoenix/schema/types/PVarcharArray.java |    0
 .../apache/phoenix/schema/types/PWholeNumber.java  |    0
 .../apache/phoenix/schema/types/PhoenixArray.java  |    0
 .../org/apache/phoenix/trace/MetricsInfoImpl.java  |    0
 .../apache/phoenix/trace/PhoenixMetricsSink.java   |    0
 .../java/org/apache/phoenix/trace/TraceReader.java |    0
 .../apache/phoenix/trace/TraceSpanReceiver.java    |    0
 .../java/org/apache/phoenix/trace/TraceWriter.java |    0
 .../org/apache/phoenix/trace/TracingIterator.java  |    0
 .../org/apache/phoenix/trace/TracingUtils.java     |    0
 .../phoenix/trace/util/ConfigurationAdapter.java   |    0
 .../org/apache/phoenix/trace/util/NullSpan.java    |    0
 .../org/apache/phoenix/trace/util/Tracing.java     |    0
 .../NotAvailableTransactionProvider.java           |    0
 .../transaction/OmidTransactionContext.java        |    0
 .../transaction/OmidTransactionProvider.java       |  134 +
 .../phoenix/transaction/OmidTransactionTable.java  |    0
 .../transaction/PhoenixTransactionClient.java      |    0
 .../transaction/PhoenixTransactionContext.java     |    0
 .../transaction/PhoenixTransactionProvider.java    |    0
 .../phoenix/transaction/TransactionFactory.java    |   86 +
 .../org/apache/phoenix/util/Base62Encoder.java     |    0
 .../org/apache/phoenix/util/BigDecimalUtil.java    |    0
 .../main/java/org/apache/phoenix/util/BitSet.java  |    0
 .../java/org/apache/phoenix/util/ByteUtil.java     |    0
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |    0
 .../java/org/apache/phoenix/util/ClientUtil.java   |  195 +
 .../java/org/apache/phoenix/util/Closeables.java   |    0
 .../java/org/apache/phoenix/util/ColumnInfo.java   |    0
 .../java/org/apache/phoenix/util/ConfigUtil.java   |    0
 .../java/org/apache/phoenix/util/CostUtil.java     |    0
 .../java/org/apache/phoenix/util/CursorUtil.java   |    0
 .../java/org/apache/phoenix/util/DateUtil.java     |    0
 .../phoenix/util/DefaultEnvironmentEdge.java       |    0
 .../apache/phoenix/util/DeferredStringBuilder.java |    0
 .../apache/phoenix/util/EncodedColumnsUtil.java    |  207 +
 .../org/apache/phoenix/util/EnvironmentEdge.java   |    0
 .../phoenix/util/EnvironmentEdgeManager.java       |    0
 .../phoenix/util/EquiDepthStreamHistogram.java     |    0
 .../org/apache/phoenix/util/ExpressionUtil.java    |    0
 .../util/FirstLastNthValueDataContainer.java       |    0
 .../java/org/apache/phoenix/util/IndexUtil.java    |  996 ++++
 .../org/apache/phoenix/util/InstanceResolver.java  |    0
 .../java/org/apache/phoenix/util/JDBCUtil.java     |    0
 .../java/org/apache/phoenix/util/JacksonUtil.java  |    0
 .../main/java/org/apache/phoenix/util/LogUtil.java |    0
 .../org/apache/phoenix/util/MajorMinorVersion.java |    0
 .../apache/phoenix/util/ManualEnvironmentEdge.java |    0
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 1185 ++++
 .../java/org/apache/phoenix/util/NumberUtil.java   |    0
 .../org/apache/phoenix/util/ParseNodeUtil.java     |    0
 .../phoenix/util/PhoenixContextExecutor.java       |    0
 .../apache/phoenix/util/PhoenixKeyValueUtil.java   |    0
 .../org/apache/phoenix/util/PhoenixRuntime.java    | 1713 ++++++
 .../org/apache/phoenix/util/PhoenixStopWatch.java  |    0
 .../org/apache/phoenix/util/PrefixByteCodec.java   |    0
 .../org/apache/phoenix/util/PrefixByteDecoder.java |    0
 .../org/apache/phoenix/util/PrefixByteEncoder.java |    0
 .../org/apache/phoenix/util/PropertiesUtil.java    |    0
 .../java/org/apache/phoenix/util/QueryBuilder.java |    0
 .../java/org/apache/phoenix/util/QueryUtil.java    |    0
 .../org/apache/phoenix/util/ReadOnlyProps.java     |    0
 .../java/org/apache/phoenix/util/ResultUtil.java   |    0
 .../java/org/apache/phoenix/util/SQLCloseable.java |    0
 .../org/apache/phoenix/util/SQLCloseables.java     |    0
 .../java/org/apache/phoenix/util/ScanUtil.java     | 1618 +++++
 .../java/org/apache/phoenix/util/SchemaUtil.java   |    0
 .../java/org/apache/phoenix/util/SequenceUtil.java |    0
 .../java/org/apache/phoenix/util/SizedUtil.java    |    0
 .../java/org/apache/phoenix/util/StringUtil.java   |    0
 .../apache/phoenix/util/TableViewFinderResult.java |   49 +
 .../phoenix/util/TaskMetaDataServiceCallBack.java  |    0
 .../java/org/apache/phoenix/util/TimeKeeper.java   |    0
 .../org/apache/phoenix/util/TransactionUtil.java   |  171 +
 .../phoenix/util/TrustedByteArrayOutputStream.java |    0
 .../java/org/apache/phoenix/util/TupleUtil.java    |    0
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 2923 +++++++++
 .../org/apache/phoenix/util/UpsertExecutor.java    |    0
 .../apache/phoenix/util/VarBinaryFormatter.java    |    0
 .../phoenix/util/ViewIndexIdRetrieveUtil.java      |    0
 .../java/org/apache/phoenix/util/ViewUtil.java     |  889 +++
 .../org/apache/phoenix/util/WALAnnotationUtil.java |   58 +
 .../apache/phoenix/util/csv/CsvUpsertExecutor.java |    0
 .../phoenix/util/csv/StringToArrayConverter.java   |    0
 .../apache/phoenix/util/i18n/LinguisticSort.java   |    0
 .../org/apache/phoenix/util/i18n/LocaleUtils.java  |    0
 .../org/apache/phoenix/util/i18n/OracleUpper.java  |    0
 .../apache/phoenix/util/i18n/OracleUpperTable.java |    0
 .../org/apache/phoenix/util/i18n/package-info.java |    0
 .../phoenix/util/json/JsonUpsertExecutor.java      |    0
 .../phoenix/util/json/ObjectToArrayConverter.java  |    0
 .../phoenix/util/regex/RegexUpsertExecutor.java    |    0
 .../src/main/java/overview.html                    |    0
 .../main/protobuf/ChildLinkMetaDataService.proto   |    0
 .../src/main/protobuf/DynamicColumnMetaData.proto  |    0
 .../src/main/protobuf/MetaDataService.proto        |    0
 .../src/main/protobuf/PFunction.proto              |    0
 .../src/main/protobuf/PGuidePosts.proto            |    0
 .../src/main/protobuf/PSchema.proto                |    0
 .../src/main/protobuf/PTable.proto                 |    0
 .../src/main/protobuf/ServerCacheFactory.proto     |    0
 .../src/main/protobuf/ServerCachingService.proto   |    0
 .../src/main/protobuf/TaskMetaDataService.proto    |    0
 .../resources/META-INF/services/java.sql.Driver    |    0
 ...oenix.monitoring.MetricPublisherSupplierFactory |    0
 ...org.apache.phoenix.query.GuidePostsCacheFactory |    0
 .../resources/phoenix-canary-file-sink.properties  |    0
 phoenix-core-server/pom.xml                        |  195 +
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java      |    0
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      |   86 +
 .../java/org/apache/hadoop/hbase/ipc/RpcUtil.java  |    0
 ...erRegionServerMetadataRpcControllerFactory.java |    0
 .../ipc/controller/ServerRpcControllerFactory.java |    0
 .../DataTableLocalIndexRegionScanner.java          |    0
 .../regionserver/IndexHalfStoreFileReader.java     |  174 +
 .../IndexHalfStoreFileReaderGenerator.java         |  310 +
 .../regionserver/IndexKeyValueSkipListSet.java     |    0
 .../hbase/regionserver/KeyValueSkipListSet.java    |    0
 .../hbase/regionserver/LocalIndexSplitter.java     |    0
 .../regionserver/LocalIndexStoreFileScanner.java   |    0
 .../hbase/regionserver/ScannerContextUtil.java     |    0
 .../wal/BinaryCompatibleBaseDecoder.java           |    0
 .../hbase/regionserver/wal/IndexedHLogReader.java  |    0
 .../regionserver/wal/IndexedWALEditCodec.java      |    0
 .../java/org/apache/phoenix/cache/GlobalCache.java |    0
 .../apache/phoenix/cache/aggcache/SpillFile.java   |    0
 .../phoenix/cache/aggcache/SpillManager.java       |    0
 .../apache/phoenix/cache/aggcache/SpillMap.java    |    0
 .../cache/aggcache/SpillableGroupByCache.java      |    0
 .../phoenix/coprocessor/AddColumnMutator.java      |  457 ++
 .../coprocessor/BaseMetaDataEndpointObserver.java  |    0
 .../phoenix/coprocessor/BaseRegionScanner.java     |    0
 .../coprocessor/BaseScannerRegionObserver.java     |  500 ++
 .../coprocessor/ChildLinkMetaDataEndpoint.java     |    0
 .../apache/phoenix/coprocessor/ColumnMutator.java  |   67 +
 .../phoenix/coprocessor/CompactionScanner.java     |    0
 .../DelegateRegionCoprocessorEnvironment.java      |    0
 .../coprocessor/DelegateRegionObserver.java        |    0
 .../phoenix/coprocessor/DelegateRegionScanner.java |    0
 .../phoenix/coprocessor/DropColumnMutator.java     |  306 +
 .../coprocessor/GlobalIndexRegionScanner.java      | 1518 +++++
 .../apache/phoenix/coprocessor/GroupByCache.java   |    0
 .../GroupedAggregateRegionObserver.java            |  613 ++
 .../phoenix/coprocessor/HashJoinRegionScanner.java |  414 ++
 .../coprocessor/IndexRebuildRegionScanner.java     |  388 ++
 .../coprocessor/IndexRepairRegionScanner.java      |  459 ++
 .../coprocessor/IndexToolVerificationResult.java   |    0
 .../phoenix/coprocessor/IndexerRegionScanner.java  |  457 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 4757 +++++++++++++++
 .../coprocessor/MetaDataEndpointObserver.java      |    0
 .../coprocessor/MetaDataRegionObserver.java        |  706 +++
 .../phoenix/coprocessor/OmidGCProcessor.java       |    0
 .../coprocessor/OmidTransactionalProcessor.java    |    0
 .../phoenix/coprocessor/PagingRegionScanner.java   |    0
 .../coprocessor/PhoenixAccessController.java       |    0
 .../phoenix/coprocessor/PhoenixCoprocessor.java    |    0
 .../PhoenixMetaDataCoprocessorHost.java            |    0
 .../coprocessor/PhoenixTTLRegionObserver.java      |  341 ++
 .../coprocessor/ReplicationSinkEndpoint.java       |    0
 .../phoenix/coprocessor/ScanRegionObserver.java    |  188 +
 .../coprocessor/SequenceRegionObserver.java        |  474 ++
 .../coprocessor/ServerCachingEndpointImpl.java     |  123 +
 .../apache/phoenix/coprocessor/SuffixFilter.java   |    0
 .../coprocessor/SystemCatalogRegionObserver.java   |    0
 .../phoenix/coprocessor/TTLRegionScanner.java      |  228 +
 .../phoenix/coprocessor/TaskMetaDataEndpoint.java  |    0
 .../phoenix/coprocessor/TaskRegionObserver.java    |  289 +
 .../coprocessor/TephraTransactionalProcessor.java  |    0
 .../UncoveredGlobalIndexRegionScanner.java         |  234 +
 .../coprocessor/UncoveredIndexRegionScanner.java   |  407 ++
 .../UncoveredLocalIndexRegionScanner.java          |  132 +
 .../UngroupedAggregateRegionObserver.java          |  948 +++
 .../UngroupedAggregateRegionScanner.java           |  721 +++
 .../apache/phoenix/coprocessor/tasks/BaseTask.java |    0
 .../coprocessor/tasks/DropChildViewsTask.java      |  107 +
 .../coprocessor/tasks/IndexRebuildTask.java        |  191 +
 .../coprocessor/tasks/TransformMonitorTask.java    |  193 +
 .../phoenix/hbase/index/CapturingAbortable.java    |    0
 .../phoenix/hbase/index/IndexRegionObserver.java   | 1637 ++++++
 .../hbase/index/IndexRegionSplitPolicy.java        |    0
 .../org/apache/phoenix/hbase/index/Indexer.java    |  755 +++
 .../apache/phoenix/hbase/index/LockManager.java    |    0
 .../hbase/index/balancer/IndexLoadBalancer.java    |    0
 .../hbase/index/builder/BaseIndexBuilder.java      |  138 +
 .../hbase/index/builder/IndexBuildManager.java     |  183 +
 .../phoenix/hbase/index/builder/IndexBuilder.java  |  153 +
 .../hbase/index/covered/CoveredColumns.java        |    0
 .../hbase/index/covered/LocalTableState.java       |    0
 .../hbase/index/covered/NonTxIndexBuilder.java     |    0
 .../hbase/index/covered/data/CachedLocalTable.java |    0
 .../hbase/index/covered/data/IndexMemStore.java    |    0
 .../hbase/index/parallel/ThreadPoolManager.java    |    0
 .../phoenix/hbase/index/wal/IndexedKeyValue.java   |    0
 .../phoenix/hbase/index/wal/KeyValueCodec.java     |    0
 .../AbstractParallelWriterIndexCommitter.java      |  241 +
 .../index/write/DelegateIndexFailurePolicy.java    |    0
 .../phoenix/hbase/index/write/IndexCommitter.java  |    0
 .../hbase/index/write/IndexFailurePolicy.java      |    0
 .../phoenix/hbase/index/write/IndexWriter.java     |    0
 .../hbase/index/write/IndexWriterUtils.java        |    0
 .../index/write/KillServerOnFailurePolicy.java     |    0
 .../write/LazyParallelWriterIndexCommitter.java    |    0
 .../index/write/LeaveIndexActiveFailurePolicy.java |    0
 .../index/write/ParallelWriterIndexCommitter.java  |    0
 .../hbase/index/write/RecoveryIndexWriter.java     |    0
 .../TrackingParallelWriterIndexCommitter.java      |  302 +
 .../write/recovery/PerRegionIndexWriteCache.java   |    0
 .../write/recovery/StoreFailuresInCachePolicy.java |    0
 .../apache/phoenix/index/GlobalIndexChecker.java   |  666 +++
 .../apache/phoenix/index/PhoenixIndexBuilder.java  |  278 +
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  392 ++
 .../phoenix/index/PhoenixIndexMetaDataBuilder.java |  107 +
 .../phoenix/index/PhoenixTransactionalIndexer.java |  241 +
 .../iterate/MapReduceParallelScanGrouper.java      |    0
 .../iterate/NonAggregateRegionScannerFactory.java  |  410 ++
 .../phoenix/iterate/RegionScannerFactory.java      |  467 ++
 .../iterate/RegionScannerResultIterator.java       |   88 +
 .../apache/phoenix/iterate/SnapshotScanner.java    |  279 +
 .../iterate/TableSnapshotResultIterator.java       |  220 +
 .../phoenix/mapreduce/AbstractBulkLoadTool.java    |    0
 .../phoenix/mapreduce/CsvBulkImportUtil.java       |    0
 .../apache/phoenix/mapreduce/CsvBulkLoadTool.java  |    0
 .../phoenix/mapreduce/CsvToKeyValueMapper.java     |    0
 .../mapreduce/FormatToBytesWritableMapper.java     |    0
 .../phoenix/mapreduce/FormatToKeyValueReducer.java |    0
 .../ImportPreUpsertKeyValueProcessor.java          |    0
 .../apache/phoenix/mapreduce/JsonBulkLoadTool.java |    0
 .../phoenix/mapreduce/JsonToKeyValueMapper.java    |    0
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  |    0
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |    0
 .../phoenix/mapreduce/PhoenixInputFormat.java      |  293 +
 .../phoenix/mapreduce/PhoenixInputSplit.java       |    0
 .../phoenix/mapreduce/PhoenixJobCounters.java      |    0
 .../mapreduce/PhoenixMultiViewInputFormat.java     |    0
 .../mapreduce/PhoenixMultiViewInputSplit.java      |    0
 .../phoenix/mapreduce/PhoenixMultiViewReader.java  |    0
 .../phoenix/mapreduce/PhoenixOutputCommitter.java  |    0
 .../phoenix/mapreduce/PhoenixOutputFormat.java     |    0
 .../phoenix/mapreduce/PhoenixRecordReader.java     |  197 +
 .../phoenix/mapreduce/PhoenixRecordWritable.java   |    0
 .../phoenix/mapreduce/PhoenixRecordWriter.java     |    0
 .../PhoenixServerBuildIndexInputFormat.java        |  218 +
 .../mapreduce/PhoenixTTLDeleteJobMapper.java       |  241 +
 .../apache/phoenix/mapreduce/PhoenixTTLTool.java   |    0
 .../phoenix/mapreduce/PhoenixTextInputFormat.java  |    0
 .../phoenix/mapreduce/RegexBulkLoadTool.java       |    0
 .../phoenix/mapreduce/RegexToKeyValueMapper.java   |    0
 .../mapreduce/bulkload/TableRowkeyPair.java        |    0
 .../phoenix/mapreduce/bulkload/TargetTableRef.java |    0
 .../bulkload/TargetTableRefFunctions.java          |    0
 .../mapreduce/index/DirectHTableWriter.java        |    0
 .../mapreduce/index/IndexScrutinyMapper.java       |  530 ++
 .../index/IndexScrutinyMapperForTest.java          |    0
 .../mapreduce/index/IndexScrutinyTableOutput.java  |    0
 .../phoenix/mapreduce/index/IndexScrutinyTool.java |  542 ++
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 1247 ++++
 .../phoenix/mapreduce/index/IndexToolUtil.java     |    0
 .../phoenix/mapreduce/index/IndexUpgradeTool.java  |  930 +++
 .../index/IndexVerificationOutputRepository.java   |  406 ++
 .../index/IndexVerificationOutputRow.java          |    0
 .../index/IndexVerificationResultRepository.java   |  415 ++
 .../mapreduce/index/PhoenixIndexDBWritable.java    |    0
 .../index/PhoenixIndexImportDirectMapper.java      |    0
 .../index/PhoenixIndexImportDirectReducer.java     |    0
 .../index/PhoenixIndexPartialBuildMapper.java      |  193 +
 .../index/PhoenixIndexToolJobCounters.java         |    0
 .../index/PhoenixScrutinyJobCounters.java          |    0
 .../index/PhoenixServerBuildIndexDBWritable.java   |    0
 .../index/PhoenixServerBuildIndexMapper.java       |    0
 .../mapreduce/index/SourceTargetColumnNames.java   |    0
 .../index/automation/PhoenixAsyncIndex.java        |    0
 .../index/automation/PhoenixMRJobCallable.java     |    0
 .../index/automation/PhoenixMRJobSubmitter.java    |    0
 .../index/automation/YarnApplication.java          |    0
 .../transform/PhoenixTransformReducer.java         |    0
 .../transform/PhoenixTransformRepairMapper.java    |  205 +
 .../PhoenixTransformWithViewsInputFormat.java      |  129 +
 .../phoenix/mapreduce/transform/TransformTool.java | 1007 ++++
 .../util/ColumnInfoToStringEncoderDecoder.java     |    0
 .../util/DefaultMultiViewJobStatusTracker.java     |    0
 .../util/DefaultMultiViewSplitStrategy.java        |    0
 .../util/DefaultPhoenixMultiViewListProvider.java  |    0
 .../phoenix/mapreduce/util/IndexColumnNames.java   |    0
 .../mapreduce/util/MultiViewJobStatusTracker.java  |    0
 .../mapreduce/util/MultiViewSplitStrategy.java     |    0
 .../mapreduce/util/PhoenixConfigurationUtil.java   |  897 +++
 .../mapreduce/util/PhoenixMapReduceUtil.java       |    0
 .../mapreduce/util/PhoenixMultiInputUtil.java      |    0
 .../util/PhoenixMultiViewListProvider.java         |    0
 .../phoenix/mapreduce/util/ViewInfoTracker.java    |    0
 .../phoenix/mapreduce/util/ViewInfoWritable.java   |    0
 .../replication/SystemCatalogWALEntryFilter.java   |    0
 .../apache/phoenix/schema/MetaDataSplitPolicy.java |    0
 .../schema/SplitOnLeadingVarCharColumnsPolicy.java |    0
 .../phoenix/schema/SystemFunctionSplitPolicy.java  |    0
 .../phoenix/schema/SystemStatsSplitPolicy.java     |    0
 .../phoenix/schema/SystemTaskSplitPolicy.java      |    0
 .../schema/stats/DefaultStatisticsCollector.java   |  394 ++
 .../schema/stats/NoOpStatisticsCollector.java      |    0
 .../phoenix/schema/stats/StatisticsCollector.java  |    0
 .../schema/stats/StatisticsCollectorFactory.java   |    0
 .../phoenix/schema/stats/StatisticsScanner.java    |    0
 .../phoenix/schema/stats/StatisticsWriter.java     |  312 +
 .../phoenix/schema/stats/UpdateStatisticsTool.java |    0
 .../org/apache/phoenix/schema/task/ServerTask.java |  116 +
 .../apache/phoenix/schema/transform/Transform.java |  456 ++
 .../util/MergeViewIndexIdSequencesTool.java        |    0
 .../org/apache/phoenix/util/PhoenixMRJobUtil.java  |    0
 .../java/org/apache/phoenix/util/RepairUtil.java   |    0
 .../org/apache/phoenix/util/ServerIndexUtil.java   |  108 +
 .../java/org/apache/phoenix/util/ServerUtil.java   |  283 +
 .../org/apache/phoenix/util/ServerViewUtil.java    |  143 +
 .../phoenix/util/ZKBasedMasterElectionUtil.java    |    0
 phoenix-core/pom.xml                               |  999 ++--
 ...WALReplayWithIndexWritesAndCompressedWALIT.java |    1 -
 .../phoenix/end2end/BackwardCompatibilityIT.java   |    4 -
 .../end2end/BackwardCompatibilityTestUtil.java     |    6 +-
 .../apache/phoenix/end2end/BasePermissionsIT.java  |    2 +-
 .../end2end/ConcurrentMutationsExtendedIT.java     |    4 +-
 .../ConcurrentUpsertsWithoutIndexedColsIT.java     |    4 +-
 .../org/apache/phoenix/end2end/CreateSchemaIT.java |   10 +-
 .../org/apache/phoenix/end2end/CreateTableIT.java  |    6 +-
 .../org/apache/phoenix/end2end/DropSchemaIT.java   |    8 +-
 .../end2end/ExplainPlanWithStatsEnabledIT.java     |    4 +-
 .../apache/phoenix/end2end/IndexExtendedIT.java    |    3 +-
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |    4 +-
 .../end2end/IndexRepairRegionScannerIT.java        |    4 +-
 .../end2end/IndexScrutinyWithMaxLookbackIT.java    |    4 +-
 .../end2end/IndexToolForNonTxGlobalIndexIT.java    |    3 +-
 .../end2end/IndexVerificationOldDesignIT.java      |    3 +-
 .../phoenix/end2end/LogicalTableNameBaseIT.java    |    4 +-
 .../phoenix/end2end/MaxLookbackExtendedIT.java     |    4 +-
 .../org/apache/phoenix/end2end/MaxLookbackIT.java  |    4 +-
 .../apache/phoenix/end2end/MetaDataEndPointIT.java |    2 +-
 .../MigrateSystemTablesToSystemNamespaceIT.java    |    2 +-
 .../phoenix/end2end/ParallelIteratorsIT.java       |    4 +-
 .../phoenix/end2end/ParallelStatsDisabledIT.java   |    5 +-
 .../apache/phoenix/end2end/PhoenixTTLToolIT.java   |    4 +-
 .../end2end/PreMatureTimelyAbortScanIt.java        |    4 +-
 .../phoenix/end2end/SchemaRegistryFailureIT.java   |    2 +-
 .../org/apache/phoenix/end2end/StoreNullsIT.java   |    4 +-
 .../SystemTablesCreationOnConnectionIT.java        |    3 +-
 .../phoenix/end2end/SystemTablesUpgradeIT.java     |    2 +-
 .../org/apache/phoenix/end2end/TableTTLIT.java     |    4 +-
 .../java/org/apache/phoenix/end2end/UpgradeIT.java |    6 +-
 .../org/apache/phoenix/end2end/UpsertSelectIT.java |    4 +-
 .../org/apache/phoenix/end2end/ViewMetadataIT.java |    2 +-
 .../java/org/apache/phoenix/end2end/ViewTTLIT.java |   26 +-
 .../org/apache/phoenix/end2end/ViewUtilIT.java     |    6 +-
 .../index/IndexRebuildIncrementDisableCountIT.java |    2 +-
 .../phoenix/end2end/index/IndexTestUtil.java       |    5 +-
 .../end2end/index/IndexTwoPhaseCreateIT.java       |    5 +-
 .../index/IndexVerificationOutputRepositoryIT.java |    2 +-
 .../index/IndexVerificationResultRepositoryIT.java |    4 +-
 .../index/InvalidIndexStateClientSideIT.java       |    6 +-
 .../apache/phoenix/end2end/index/LocalIndexIT.java |    5 +-
 .../end2end/index/PartialIndexRebuilderIT.java     |    7 +-
 .../phoenix/end2end/join/HashJoinCacheIT.java      |    2 +-
 .../phoenix/end2end/transform/TransformIT.java     |    4 +-
 .../transform/TransformMonitorExtendedIT.java      |    4 +-
 .../end2end/transform/TransformMonitorIT.java      |    3 +-
 .../phoenix/end2end/transform/TransformToolIT.java |    6 +-
 .../phoenix/trace/PhoenixTracingEndToEndIT.java    |    4 +-
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java      |  100 -
 .../hbase/ipc/controller/IndexRpcController.java   |   57 -
 .../ipc/controller/MetadataRpcController.java      |   74 -
 .../ServerToServerRpcControllerImpl.java           |   75 -
 .../regionserver/IndexHalfStoreFileReader.java     |  174 -
 .../IndexHalfStoreFileReaderGenerator.java         |  305 -
 .../apache/phoenix/cache/ServerCacheClient.java    |  554 --
 .../java/org/apache/phoenix/cache/TenantCache.java |   42 -
 .../org/apache/phoenix/cache/TenantCacheImpl.java  |  287 -
 .../apache/phoenix/compile/AggregationManager.java |  115 -
 .../org/apache/phoenix/compile/DeleteCompiler.java | 1030 ----
 .../org/apache/phoenix/compile/FromCompiler.java   | 1234 ----
 .../apache/phoenix/compile/GroupByCompiler.java    |  476 --
 .../org/apache/phoenix/compile/JoinCompiler.java   | 1590 -----
 .../apache/phoenix/compile/PostDDLCompiler.java    |  372 --
 .../phoenix/compile/PostLocalIndexDDLCompiler.java |  133 -
 .../apache/phoenix/compile/ProjectionCompiler.java |  796 ---
 .../org/apache/phoenix/compile/QueryCompiler.java  |  815 ---
 .../org/apache/phoenix/compile/ScanRanges.java     |  785 ---
 .../phoenix/compile/ServerBuildIndexCompiler.java  |  161 -
 .../ServerBuildTransformingTableCompiler.java      |   99 -
 .../org/apache/phoenix/compile/UpsertCompiler.java | 1455 -----
 .../org/apache/phoenix/compile/WhereCompiler.java  |  958 ---
 .../phoenix/coprocessor/AddColumnMutator.java      |  467 --
 .../coprocessor/BaseScannerRegionObserver.java     |  634 --
 .../apache/phoenix/coprocessor/ColumnMutator.java  |   66 -
 .../phoenix/coprocessor/DropColumnMutator.java     |  305 -
 .../coprocessor/GlobalIndexRegionScanner.java      | 1517 -----
 .../GroupedAggregateRegionObserver.java            |  642 --
 .../HashJoinCacheNotFoundException.java            |   45 -
 .../phoenix/coprocessor/HashJoinRegionScanner.java |  413 --
 .../coprocessor/IndexRebuildRegionScanner.java     |  389 --
 .../coprocessor/IndexRepairRegionScanner.java      |  460 --
 .../phoenix/coprocessor/IndexerRegionScanner.java  |  455 --
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 4762 ---------------
 .../phoenix/coprocessor/MetaDataProtocol.java      |  521 --
 .../coprocessor/MetaDataRegionObserver.java        |  672 ---
 .../coprocessor/PhoenixTTLRegionObserver.java      |  340 --
 .../phoenix/coprocessor/ScanRegionObserver.java    |  220 -
 .../coprocessor/SequenceRegionObserver.java        |  471 --
 .../coprocessor/ServerCachingEndpointImpl.java     |  123 -
 .../phoenix/coprocessor/ServerCachingProtocol.java |   61 -
 .../phoenix/coprocessor/TTLRegionScanner.java      |  228 -
 .../org/apache/phoenix/coprocessor/TableInfo.java  |   79 -
 .../phoenix/coprocessor/TaskRegionObserver.java    |  288 -
 .../UncoveredGlobalIndexRegionScanner.java         |  233 -
 .../coprocessor/UncoveredIndexRegionScanner.java   |  402 --
 .../UncoveredLocalIndexRegionScanner.java          |  132 -
 .../UngroupedAggregateRegionObserver.java          |  979 ---
 .../UngroupedAggregateRegionScanner.java           |  719 ---
 .../phoenix/coprocessor/WhereConstantParser.java   |  110 -
 .../MetricsPhoenixCoprocessorSourceFactory.java    |   45 -
 .../metrics/MetricsPhoenixTTLSource.java           |   61 -
 .../metrics/MetricsPhoenixTTLSourceImpl.java       |   58 -
 .../coprocessor/tasks/DropChildViewsTask.java      |  107 -
 .../coprocessor/tasks/IndexRebuildTask.java        |  194 -
 .../coprocessor/tasks/TransformMonitorTask.java    |  216 -
 .../apache/phoenix/exception/SQLExceptionCode.java |  694 ---
 .../exception/UpgradeInProgressException.java      |   31 -
 .../org/apache/phoenix/execute/AggregatePlan.java  |  371 --
 .../org/apache/phoenix/execute/BaseQueryPlan.java  |  582 --
 .../phoenix/execute/ClientAggregatePlan.java       |  384 --
 .../org/apache/phoenix/execute/HashJoinPlan.java   |  700 ---
 .../org/apache/phoenix/execute/MutationState.java  | 2275 -------
 .../execute/PhoenixTxIndexMutationGenerator.java   |  523 --
 .../java/org/apache/phoenix/execute/ScanPlan.java  |  378 --
 .../org/apache/phoenix/execute/TupleProjector.java |  495 --
 .../phoenix/filter/BooleanExpressionFilter.java    |  135 -
 .../MultiEncodedCQKeyValueComparisonFilter.java    |  415 --
 .../filter/MultiKeyValueComparisonFilter.java      |  291 -
 .../filter/SystemCatalogViewIndexIdFilter.java     |  161 -
 .../phoenix/hbase/index/IndexRegionObserver.java   | 1676 ------
 .../org/apache/phoenix/hbase/index/Indexer.java    |  780 ---
 .../hbase/index/builder/BaseIndexBuilder.java      |  138 -
 .../hbase/index/builder/BaseIndexCodec.java        |   36 -
 .../hbase/index/builder/IndexBuildManager.java     |  182 -
 .../phoenix/hbase/index/builder/IndexBuilder.java  |  153 -
 .../phoenix/hbase/index/covered/IndexCodec.java    |   88 -
 .../phoenix/hbase/index/covered/IndexMetaData.java |   55 -
 .../phoenix/hbase/index/covered/TableState.java    |   76 -
 .../hbase/index/metrics/MetricsIndexerSource.java  |  213 -
 .../hbase/index/util/IndexManagementUtil.java      |  293 -
 .../AbstractParallelWriterIndexCommitter.java      |  241 -
 .../TrackingParallelWriterIndexCommitter.java      |  302 -
 .../apache/phoenix/index/GlobalIndexChecker.java   |  658 ---
 .../org/apache/phoenix/index/IndexMaintainer.java  | 2300 --------
 .../phoenix/index/IndexMetaDataCacheClient.java    |  158 -
 .../phoenix/index/IndexMetaDataCacheFactory.java   |   82 -
 .../apache/phoenix/index/PhoenixIndexBuilder.java  |  394 --
 .../apache/phoenix/index/PhoenixIndexCodec.java    |  139 -
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  596 --
 .../apache/phoenix/index/PhoenixIndexMetaData.java |  101 -
 .../phoenix/index/PhoenixIndexMetaDataBuilder.java |  107 -
 .../phoenix/index/PhoenixTransactionalIndexer.java |  246 -
 .../phoenix/iterate/BaseResultIterators.java       | 1771 ------
 .../phoenix/iterate/ChunkedResultIterator.java     |  249 -
 .../phoenix/iterate/ConcatResultIterator.java      |  150 -
 .../org/apache/phoenix/iterate/ExplainTable.java   |  550 --
 .../iterate/NonAggregateRegionScannerFactory.java  |  410 --
 .../phoenix/iterate/OrderedResultIterator.java     |  426 --
 .../phoenix/iterate/RegionScannerFactory.java      |  464 --
 .../iterate/RegionScannerResultIterator.java       |   88 -
 .../phoenix/iterate/RoundRobinResultIterator.java  |  354 --
 .../RowKeyOrderedAggregateResultIterator.java      |  200 -
 .../phoenix/iterate/ScanningResultIterator.java    |  236 -
 .../apache/phoenix/iterate/SerialIterators.java    |  231 -
 .../apache/phoenix/iterate/SnapshotScanner.java    |  279 -
 .../phoenix/iterate/SpoolingResultIterator.java    |  381 --
 .../phoenix/iterate/TableResultIterator.java       |  344 --
 .../iterate/TableSnapshotResultIterator.java       |  220 -
 .../phoenix/iterate/UnionResultIterators.java      |  166 -
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java      | 1951 ------
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  198 -
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  | 1649 ------
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  | 2598 --------
 .../org/apache/phoenix/join/HashCacheClient.java   |  228 -
 .../org/apache/phoenix/join/HashCacheFactory.java  |  174 -
 .../phoenix/mapreduce/PhoenixInputFormat.java      |  293 -
 .../phoenix/mapreduce/PhoenixRecordReader.java     |  197 -
 .../PhoenixServerBuildIndexInputFormat.java        |  218 -
 .../mapreduce/PhoenixTTLDeleteJobMapper.java       |  241 -
 .../mapreduce/index/IndexScrutinyMapper.java       |  530 --
 .../phoenix/mapreduce/index/IndexScrutinyTool.java |  542 --
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 1247 ----
 .../phoenix/mapreduce/index/IndexUpgradeTool.java  |  930 ---
 .../index/IndexVerificationOutputRepository.java   |  406 --
 .../index/IndexVerificationResultRepository.java   |  415 --
 .../index/PhoenixIndexPartialBuildMapper.java      |  194 -
 .../transform/PhoenixTransformRepairMapper.java    |  205 -
 .../PhoenixTransformWithViewsInputFormat.java      |  129 -
 .../phoenix/mapreduce/transform/TransformTool.java | 1006 ----
 .../phoenix/mapreduce/util/ConnectionUtil.java     |  141 -
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 1071 ----
 .../phoenix/monitoring/LatencyHistogram.java       |   46 -
 .../apache/phoenix/monitoring/SizeHistogram.java   |   47 -
 .../ConnectionQueryServicesHistogram.java          |   43 -
 .../parse/PhoenixRowTimestampParseNode.java        |   98 -
 .../phoenix/query/ConnectionQueryServices.java     |  230 -
 .../phoenix/query/ConnectionQueryServicesImpl.java | 6206 --------------------
 .../query/ConnectionlessQueryServicesImpl.java     |  817 ---
 .../query/DelegateConnectionQueryServices.java     |  424 --
 .../org/apache/phoenix/query/QueryConstants.java   |  590 --
 .../org/apache/phoenix/query/QueryServices.java    |  469 --
 .../apache/phoenix/query/QueryServicesOptions.java |  974 ---
 .../org/apache/phoenix/schema/MetaDataClient.java  | 6019 -------------------
 .../main/java/org/apache/phoenix/schema/PRow.java  |   89 -
 .../java/org/apache/phoenix/schema/PTableImpl.java | 2469 --------
 .../java/org/apache/phoenix/schema/Sequence.java   |  639 --
 .../schema/stats/DefaultStatisticsCollector.java   |  394 --
 .../phoenix/schema/stats/StatisticsUtil.java       |  252 -
 .../phoenix/schema/stats/StatisticsWriter.java     |  312 -
 .../java/org/apache/phoenix/schema/task/Task.java  |  529 --
 .../apache/phoenix/schema/transform/Transform.java |  795 ---
 .../transaction/OmidTransactionProvider.java       |  136 -
 .../phoenix/transaction/TransactionFactory.java    |   86 -
 .../apache/phoenix/util/EncodedColumnsUtil.java    |  207 -
 .../java/org/apache/phoenix/util/IndexUtil.java    | 1019 ----
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 1243 ----
 .../org/apache/phoenix/util/PhoenixRuntime.java    | 1713 ------
 .../java/org/apache/phoenix/util/ScanUtil.java     | 1589 -----
 .../java/org/apache/phoenix/util/ServerUtil.java   |  435 --
 .../apache/phoenix/util/TableViewFinderResult.java |   49 -
 .../org/apache/phoenix/util/TransactionUtil.java   |  171 -
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 2925 ---------
 .../java/org/apache/phoenix/util/ViewUtil.java     |  994 ----
 .../org/apache/phoenix/util/WALAnnotationUtil.java |   78 -
 .../org/apache/phoenix/cache/TenantCacheTest.java  |    4 +-
 .../apache/phoenix/compile/QueryCompilerTest.java  |   10 +-
 .../apache/phoenix/compile/QueryOptimizerTest.java |    4 +-
 .../execute/LiteralResultIteratorPlanTest.java     |    2 +-
 .../index/covered/CoveredColumnIndexCodec.java     |    2 +-
 .../CoveredColumnIndexSpecifierBuilder.java        |   11 +-
 .../index/covered/CoveredIndexCodecForTesting.java |    2 +-
 .../hbase/index/covered/LocalTableStateTest.java   |    2 +-
 .../hbase/index/covered/NonTxIndexBuilderTest.java |    2 +-
 .../org/apache/phoenix/index/ShouldVerifyTest.java |    8 +-
 .../phoenix/iterate/OrderedResultIteratorTest.java |    4 +-
 .../util/PhoenixConfigurationUtilTest.java         |   20 +-
 .../java/org/apache/phoenix/query/BaseTest.java    |    5 -
 .../apache/phoenix/schema/MetaDataClientTest.java  |    2 +-
 .../org/apache/phoenix/util/ClientUtilTest.java    |   93 +
 .../org/apache/phoenix/util/MetaDataUtilTest.java  |    5 +-
 .../apache/phoenix/util/PropertiesUtilTest.java    |    7 +-
 .../java/org/apache/phoenix/util/ScanUtilTest.java |    4 +-
 .../org/apache/phoenix/util/ServerUtilTest.java    |   93 -
 phoenix-pherf/pom.xml                              |   15 +-
 pom.xml                                            |   12 +
 1565 files changed, 99388 insertions(+), 98560 deletions(-)

diff --git a/phoenix-client-parent/phoenix-client-embedded/pom.xml b/phoenix-client-parent/phoenix-client-embedded/pom.xml
index 50cecce512..e9b79750dc 100644
--- a/phoenix-client-parent/phoenix-client-embedded/pom.xml
+++ b/phoenix-client-parent/phoenix-client-embedded/pom.xml
@@ -68,7 +68,7 @@
     <!-- Depend on all other internal projects -->
     <dependency>
       <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
+      <artifactId>phoenix-core-server</artifactId>
       <exclusions>
         <exclusion>
           <groupId>org.slf4j</groupId>
diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml
new file mode 100644
index 0000000000..1d886b3bae
--- /dev/null
+++ b/phoenix-core-client/pom.xml
@@ -0,0 +1,434 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!-- trigger end2end tests-->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.phoenix</groupId>
+    <artifactId>phoenix</artifactId>
+    <version>5.2.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>phoenix-core-client</artifactId>
+  <name>Phoenix Core Client</name>
+  <description>Core Phoenix Client codebase</description>
+
+  <build>
+    <plugins>
+      <!-- Add the ant-generated sources to the source path -->
+     <plugin>
+       <groupId>org.apache.maven.plugins</groupId>
+       <artifactId>maven-site-plugin</artifactId>
+     </plugin>
+     <!-- If this is configured in the top-level dependencyManagement,
+     it will run on the compat module even if the plugin is not enabled.
+     Sounds like another maven bug -->
+     <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-enforcer-plugin</artifactId>
+          <configuration>
+            <rules>
+              <evaluateBeanshell>
+                <condition>
+                  import java.util.regex.Pattern;
+                  import java.lang.Integer;
+
+                  versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[^.]*$");
+                  versionMatcher = versionPattern.matcher("${hbase.version}");
+                  versionMatcher.find();
+
+                  hbaseMajor = Integer.parseInt(versionMatcher.group(1));
+                  hbaseMinor = Integer.parseInt(versionMatcher.group(2));
+                  hbasePatch = Integer.parseInt(versionMatcher.group(3));
+
+                  hbaseMajor == 2 &amp;&amp; (
+                    ("${hbase.compat.version}".equals("2.4.0")
+                      &amp;&amp; hbaseMinor == 4
+                      &amp;&amp; hbasePatch == 0)
+                    || ("${hbase.compat.version}".equals("2.4.1")
+                      &amp;&amp; hbaseMinor == 4
+                      &amp;&amp; hbasePatch &gt;=1)
+                    || ("${hbase.compat.version}".equals("2.5.0")
+                      &amp;&amp; hbaseMinor == 5
+                      &amp;&amp; hbasePatch &gt;=0)
+                    || ("${hbase.compat.version}".equals("2.5.4")
+                      &amp;&amp; hbaseMinor == 5
+                      &amp;&amp; hbasePatch &gt;=4)
+                  )
+                </condition>
+              </evaluateBeanshell>
+            </rules>
+          </configuration>
+          <executions>
+         <execution>
+           <id>check-hbase-compatibility</id>
+           <phase>validate</phase>
+           <goals>
+             <goal>enforce</goal>
+           </goals>
+         </execution>
+       </executions>
+     </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>${antlr-output.dir}</source>
+                <source>${antlr-input.dir}</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <!-- Compile the antlr sources -->
+      <plugin>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr3-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>antlr</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <outputDirectory>${antlr-output.dir}/org/apache/phoenix/parse</outputDirectory>
+        </configuration>
+      </plugin>
+      <!-- Setup eclipse -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-eclipse-plugin</artifactId>
+        <configuration>
+          <buildcommands>
+            <buildcommand>org.jamon.project.templateBuilder</buildcommand>
+            <buildcommand>org.eclipse.jdt.core.javabuilder</buildcommand>
+          </buildcommands>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <!-- generates the file that will be used by the sandbox script in the dev env -->
+            <id>create-phoenix-generated-classpath</id>
+            <goals>
+              <goal>build-classpath</goal>
+            </goals>
+            <configuration>
+              <outputFile>${project.build.directory}/cached_classpath.txt</outputFile>
+            </configuration>
+          </execution>
+          <!-- copies libraries for use by sqlline when it is started from source dir -->
+          <execution>
+            <id>copy-for-sqlline</id>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.apache.logging.log4j</groupId>
+                  <artifactId>log4j-api</artifactId>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.apache.logging.log4j</groupId>
+                  <artifactId>log4j-core</artifactId>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.apache.logging.log4j</groupId>
+                  <artifactId>log4j-slf4j-impl</artifactId>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.apache.logging.log4j</groupId>
+                  <artifactId>log4j-1.2-api</artifactId>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>sqlline</groupId>
+                  <artifactId>sqlline</artifactId>
+                  <classifier>jar-with-dependencies</classifier>
+                </artifactItem>
+              </artifactItems>
+              <outputDirectory>${project.basedir}/../lib</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/main/java/org/apache/phoenix/coprocessor/generated/*.java</exclude>
+            <exclude>src/main/resources/META-INF/services/java.sql.Driver</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>compile-protoc</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+            <configuration>
+              <protocArtifact>${protobuf.group}:protoc:${protoc.version}:exe:${protoc.arch}</protocArtifact>
+              <protoSourceRoot>${basedir}/src/main/protobuf/</protoSourceRoot>
+              <clearOutputDirectory>false</clearOutputDirectory>
+              <checkStaleness>true</checkStaleness>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <!-- shaded thirdparty dependencies -->
+    <dependency>
+      <groupId>org.apache.phoenix.thirdparty</groupId>
+      <artifactId>phoenix-shaded-guava</artifactId>
+    </dependency>
+
+    <!-- HBase compat dependency -->
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-hbase-compat-${hbase.compat.version}</artifactId>
+      <optional>true</optional>
+    </dependency>
+
+    <!-- Hadoop dependencies -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+    </dependency>
+
+    <!-- HBase dependencies -->
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-metrics-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop2-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol-shaded</artifactId>
+    </dependency>
+
+    <!-- HBase Adjacent Dependencies -->
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-framework</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-recipes</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper-jute</artifactId>
+    </dependency>
+
+    <!-- Transaction dependencies -->
+    <!-- Omid dependencies -->
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-hbase-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-hbase-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-timestamp-storage</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-hbase-commit-table</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-transaction-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-commit-table</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.omid</groupId>
+      <artifactId>omid-codahale-metrics</artifactId>
+    </dependency>
+
+    <!-- Other dependencies -->
+    <dependency>
+      <groupId>org.antlr</groupId>
+      <artifactId>antlr-runtime</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>joda-time</groupId>
+      <artifactId>joda-time</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.github.stephenc.findbugs</groupId>
+      <artifactId>findbugs-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.github.stephenc.jcip</groupId>
+      <artifactId>jcip-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>jsr305</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.iq80.snappy</groupId>
+      <artifactId>snappy</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.phoenix.thirdparty</groupId>
+      <artifactId>phoenix-shaded-commons-cli</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-csv</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-configuration2</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.clearspring.analytics</groupId>
+      <artifactId>stream</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.ibm.icu</groupId>
+      <artifactId>icu4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.ibm.icu</groupId>
+      <artifactId>icu4j-localespi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.lmax</groupId>
+      <artifactId>disruptor</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.jruby.joni</groupId>
+      <artifactId>joni</artifactId>
+    </dependency>
+    <dependency>
+      <groupId> org.jruby.jcodings</groupId>
+      <artifactId>jcodings</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.hdrhistogram</groupId>
+      <artifactId>HdrHistogram</artifactId>
+    </dependency>
+  </dependencies>
+</project>
diff --git a/phoenix-core/src/build/phoenix-core.xml b/phoenix-core-client/src/build/phoenix-core.xml
similarity index 100%
rename from phoenix-core/src/build/phoenix-core.xml
rename to phoenix-core-client/src/build/phoenix-core.xml
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core-client/src/main/antlr3/PhoenixSQL.g
similarity index 100%
rename from phoenix-core/src/main/antlr3/PhoenixSQL.g
rename to phoenix-core-client/src/main/antlr3/PhoenixSQL.g
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java
rename to phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java
rename to phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
rename to phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
new file mode 100644
index 0000000000..0e876fe6ae
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+import com.google.protobuf.RpcController;
+import org.apache.phoenix.util.IndexUtil;
+
+/**
+ * {@link RpcController} that sets the appropriate priority of RPC calls destined for Phoenix index
+ * tables.
+ */
+class IndexRpcController extends DelegatingHBaseRpcController {
+
+    private final int priority;
+    private final String tracingTableName;
+    
+    public IndexRpcController(HBaseRpcController delegate, Configuration conf) {
+        super(delegate);
+        this.priority = IndexUtil.getIndexPriority(conf);
+        this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
+                QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+    }
+    
+    @Override
+    public void setPriority(final TableName tn) {
+		if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) {
+			setPriority(this.priority);
+		}  
+        else {
+            super.setPriority(tn);
+        }
+    }
+    
+
+}
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
rename to phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
new file mode 100644
index 0000000000..16ad439442
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
+import com.google.protobuf.RpcController;
+
+/**
+ * {@link RpcController} that sets the appropriate priority of RPC calls destined for Phoenix SYSTEM
+ * tables
+ */
+class MetadataRpcController extends DelegatingHBaseRpcController {
+
+	private int priority;
+	// list of system tables
+	private static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>()
+			.add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
+			.add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
+			.add(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)
+			.add(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME)
+			.add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME)
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)
+                    .getNameAsString())
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, true)
+                    .getNameAsString())
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true)
+                    .getNameAsString())
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES, true)
+                    .getNameAsString())
+			.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true)
+					.getNameAsString())
+            .build();
+
+	public MetadataRpcController(HBaseRpcController delegate,
+			Configuration conf) {
+		super(delegate);
+		this.priority = IndexUtil.getMetadataPriority(conf);
+	}
+
+	@Override
+	public void setPriority(final TableName tn) {
+		if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
+			setPriority(this.priority);
+		} else {
+			super.setPriority(tn);
+		}
+	}
+
+}
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
rename to phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java
rename to phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java
new file mode 100644
index 0000000000..8e12d2ec2a
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.hadoop.hbase.ipc.controller;
+
+import com.google.protobuf.RpcController;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+
+import java.util.List;
+
+/**
+ * {@link RpcController} that sets the appropriate priority of server-server RPC calls destined
+ * for Phoenix SYSTEM tables.
+ */
+public class ServerToServerRpcControllerImpl extends ServerRpcController implements
+        ServerToServerRpcController {
+
+    private int priority;
+    // list of system tables that can possibly have server-server rpc's
+    private static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>()
+            .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
+            .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME)
+            .add(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)
+                    .getNameAsString())
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true)
+                    .getNameAsString())
+            .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true)
+                    .getNameAsString())
+            .build();
+
+    public ServerToServerRpcControllerImpl(
+            Configuration conf) {
+        super();
+        this.priority = IndexUtil.getServerSidePriority(conf);
+    }
+
+    @Override
+    public void setPriority(final TableName tn) {
+        if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
+            setPriority(this.priority);
+        }
+    }
+
+
+    @Override public void setPriority(int priority) {
+        this.priority = priority;
+    }
+
+
+    @Override public int getPriority() {
+        return this.priority;
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
new file mode 100644
index 0000000000..6700ce7c50
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -0,0 +1,553 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import static org.apache.phoenix.monitoring.TaskExecutionMetricsHolder.NO_OP_INSTANCE;
+import static org.apache.phoenix.util.LogUtil.addCustomAnnotations;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.ScanRanges;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
+import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory;
+import org.apache.phoenix.coprocessor.generated.ServerCacheFactoryProtos;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheRequest;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheResponse;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.job.JobManager.JobCallable;
+import org.apache.phoenix.join.HashCacheFactory;
+import org.apache.phoenix.memory.InsufficientMemoryException;
+import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
+import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.SQLCloseable;
+import org.apache.phoenix.util.SQLCloseables;
+import org.apache.phoenix.util.ScanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * 
+ * Client for sending cache to each region server
+ * 
+ * 
+ * @since 0.1
+ */
+public class ServerCacheClient {
+    public static final int UUID_LENGTH = Bytes.SIZEOF_LONG;
+    public static final byte[] KEY_IN_FIRST_REGION = new byte[]{0};
+    private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class);
+    private static final Random RANDOM = new Random();
+	public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = "hash.join.server.cache.resend.per.server";
+    private final PhoenixConnection connection;
+    private final Map<Integer, PTable> cacheUsingTableMap = new ConcurrentHashMap<Integer, PTable>();
+
+    /**
+     * Construct client used to create a serialized cached snapshot of a table and send it to each region server
+     * for caching during hash join processing.
+     * @param connection the client connection
+     * 
+     * TODO: instead of minMaxKeyRange, have an interface for iterating through ranges as we may be sending to
+     * servers when we don't have to if the min is in first region and max is in last region, especially for point queries.
+     */
+    public ServerCacheClient(PhoenixConnection connection) {
+        this.connection = connection;
+    }
+
+    public PhoenixConnection getConnection() {
+        return connection;
+    }
+    
+    /**
+     * Client-side representation of a server cache.  Call {@link #close()} when usage
+     * is complete to free cache up on region server
+     *
+     * 
+     * @since 0.1
+     */
+    public class ServerCache implements SQLCloseable {
+        private final int size;
+        private final byte[] id;
+        private final Map<HRegionLocation, Long> servers;
+        private ImmutableBytesWritable cachePtr;
+        private MemoryChunk chunk;
+        private File outputFile;
+        private long maxServerCacheTTL;
+        
+        
+        public ServerCache(byte[] id, Set<HRegionLocation> servers, ImmutableBytesWritable cachePtr,
+                ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException {
+            maxServerCacheTTL = services.getProps().getInt(
+                    QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB,
+                    QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
+            this.id = id;
+            this.servers = new HashMap();
+            long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+            for(HRegionLocation loc : servers) {
+                this.servers.put(loc, currentTime);
+            }
+            this.size =  cachePtr.getLength();
+            if (storeCacheOnClient) {
+                try {
+                    this.chunk = services.getMemoryManager().allocate(cachePtr.getLength());
+                    this.cachePtr = cachePtr;
+                } catch (InsufficientMemoryException e) {
+                    this.outputFile = File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps()
+                            .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)));
+                    try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) {
+                        fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength());
+                    }
+                }
+            }
+            
+        }
+
+        public ImmutableBytesWritable getCachePtr() throws IOException {
+            if(this.outputFile!=null){
+                try (InputStream fio = Files.newInputStream(outputFile.toPath())) {
+                    byte[] b = new byte[this.size];
+                    fio.read(b);
+                    cachePtr = new ImmutableBytesWritable(b);
+                }
+            }
+            return cachePtr;
+        }
+
+        /**
+         * Gets the size in bytes of hash cache
+         */
+        public int getSize() {
+            return size;
+        }
+
+        /**
+         * Gets the unique identifier for this hash cache
+         */
+        public byte[] getId() {
+            return id;
+        }
+
+        public boolean addServer(HRegionLocation loc) {
+            if(this.servers.containsKey(loc)) {
+                return false;
+            } else {
+                this.servers.put(loc, EnvironmentEdgeManager.currentTimeMillis());
+                return true;
+            }
+        }
+
+        public boolean isExpired(HRegionLocation loc) {
+            if(this.servers.containsKey(loc)) {
+                Long time = this.servers.get(loc);
+                if(EnvironmentEdgeManager.currentTimeMillis() - time > maxServerCacheTTL)
+                    return true; // cache was send more than maxTTL ms ago, expecting that it's expired
+            } else {
+                return false; // should be on server yet.
+            }
+            return false; // Unknown region location. Need to send the cache.
+        }
+
+
+        
+        /**
+         * Call to free up cache on region servers when no longer needed
+         */
+        @Override
+        public void close() throws SQLException {
+            try{
+                removeServerCache(this, servers.keySet());
+            }finally{
+                cachePtr = null;
+                if (chunk != null) {
+                    chunk.close();
+                }
+                if (outputFile != null) {
+                    outputFile.delete();
+                }
+            }
+        }
+    }
+
+    public ServerCache createServerCache(byte[] cacheId, QueryPlan delegate)
+            throws SQLException, IOException {
+        PTable cacheUsingTable = delegate.getTableRef().getTable();
+        ConnectionQueryServices services = delegate.getContext().getConnection().getQueryServices();
+        List<HRegionLocation> locations = services.getAllTableRegions(
+                cacheUsingTable.getPhysicalName().getBytes());
+        int nRegions = locations.size();
+        Set<HRegionLocation> servers = new HashSet<>(nRegions);
+        cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable);
+        return new ServerCache(cacheId, servers, new ImmutableBytesWritable(
+                new byte[]{}), services, false);
+    }
+
+    public ServerCache addServerCache(
+            ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState,
+            final ServerCacheFactory cacheFactory, final PTable cacheUsingTable)
+            throws SQLException {
+        return addServerCache(keyRanges, cachePtr, txState, cacheFactory, cacheUsingTable, false);
+    }
+
+    public ServerCache addServerCache(
+            ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState,
+            final ServerCacheFactory cacheFactory, final PTable cacheUsingTable,
+            boolean storeCacheOnClient) throws SQLException {
+        final byte[] cacheId = ServerCacheClient.generateId();
+        return addServerCache(keyRanges, cacheId, cachePtr, txState, cacheFactory,
+                cacheUsingTable, false, storeCacheOnClient);
+    }
+
+    public ServerCache addServerCache(
+            ScanRanges keyRanges, final byte[] cacheId, final ImmutableBytesWritable cachePtr,
+            final byte[] txState, final ServerCacheFactory cacheFactory,
+            final PTable cacheUsingTable, final boolean usePersistentCache,
+            boolean storeCacheOnClient) throws SQLException {
+        ConnectionQueryServices services = connection.getQueryServices();
+        List<Closeable> closeables = new ArrayList<Closeable>();
+        ServerCache hashCacheSpec = null;
+        SQLException firstException = null;
+        /**
+         * Execute EndPoint in parallel on each server to send compressed hash cache 
+         */
+        // TODO: generalize and package as a per region server EndPoint caller
+        // (ideally this would be functionality provided by the coprocessor framework)
+        boolean success = false;
+        ExecutorService executor = services.getExecutor();
+        List<Future<Boolean>> futures = Collections.emptyList();
+        try {
+            List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
+            int nRegions = locations.size();
+            // Size these based on worst case
+            futures = new ArrayList<Future<Boolean>>(nRegions);
+            Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
+            for (HRegionLocation entry : locations) {
+                // Keep track of servers we've sent to and only send once
+                byte[] regionStartKey = entry.getRegion().getStartKey();
+                byte[] regionEndKey = entry.getRegion().getEndKey();
+                if ( ! servers.contains(entry) && 
+                        keyRanges.intersectRegion(regionStartKey, regionEndKey,
+                                cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
+                    // Call RPC once per server
+                    servers.add(entry);
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(addCustomAnnotations(
+                                "Adding cache entry to be sent for " + entry, connection));
+                    }
+                    final byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
+                    final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
+                    closeables.add(htable);
+                    futures.add(executor.submit(new JobCallable<Boolean>() {
+                        
+                        @Override
+                        public Boolean call() throws Exception {
+                            return addServerCache(htable, key, cacheUsingTable, cacheId, cachePtr, cacheFactory, txState, usePersistentCache);
+                        }
+
+                        /**
+                         * Defines the grouping for round robin behavior.  All threads spawned to process
+                         * this scan will be grouped together and time sliced with other simultaneously
+                         * executing parallel scans.
+                         */
+                        @Override
+                        public Object getJobId() {
+                            return ServerCacheClient.this;
+                        }
+                        
+                        @Override
+                        public TaskExecutionMetricsHolder getTaskExecutionMetric() {
+                            return NO_OP_INSTANCE;
+                        }
+                    }));
+                } else {
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(addCustomAnnotations(
+                                "NOT adding cache entry to be sent for " + entry +
+                                        " since one already exists for that entry", connection));
+                    }
+                }
+            }
+
+            hashCacheSpec = new ServerCache(cacheId,servers,cachePtr, services, storeCacheOnClient);
+            // Execute in parallel
+            int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
+            for (Future<Boolean> future : futures) {
+                future.get(timeoutMs, TimeUnit.MILLISECONDS);
+            }
+
+            cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable);
+            success = true;
+        } catch (SQLException e) {
+            firstException = e;
+        } catch (Exception e) {
+            firstException = new SQLException(e);
+        } finally {
+            try {
+                if (!success) {
+                    if (hashCacheSpec != null) {
+                        SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
+                    }
+                    SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
+                    for (Future<Boolean> future : futures) {
+                        future.cancel(true);
+                    }
+                }
+            } finally {
+                try {
+                    Closeables.closeAll(closeables);
+                } catch (IOException e) {
+                    if (firstException == null) {
+                        firstException = new SQLException(e);
+                    }
+                } finally {
+                    if (firstException != null) {
+                        throw firstException;
+                    }
+                }
+            }
+        }
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(addCustomAnnotations("Cache " + cacheId +
+                    " successfully added to servers.", connection));
+        }
+        return hashCacheSpec;
+    }
+    
+    /**
+     * Remove the cached table from all region servers
+     * @throws SQLException
+     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
+     */
+    private void removeServerCache(final ServerCache cache, Set<HRegionLocation> remainingOnServers) throws SQLException {
+        Table iterateOverTable = null;
+        final byte[] cacheId = cache.getId();
+        try {
+            ConnectionQueryServices services = connection.getQueryServices();
+            Throwable lastThrowable = null;
+            final PTable cacheUsingTable = cacheUsingTableMap.get(Bytes.mapKey(cacheId));
+            byte[] tableName = cacheUsingTable.getPhysicalName().getBytes();
+            iterateOverTable = services.getTable(tableName);
+
+            List<HRegionLocation> locations = services.getAllTableRegions(tableName);
+            /**
+             * Allow for the possibility that the region we based where to send our cache has split and been relocated
+             * to another region server *after* we sent it, but before we removed it. To accommodate this, we iterate
+             * through the current metadata boundaries and remove the cache once for each server that we originally sent
+             * to.
+             */
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(addCustomAnnotations(
+                        "Removing Cache " + cacheId + " from servers.", connection));
+            }
+            for (HRegionLocation entry : locations) {
+             // Call once per server
+                if (remainingOnServers.contains(entry)) { 
+                    try {
+                        byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
+                        iterateOverTable.coprocessorService(ServerCachingService.class, key, key,
+                                new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
+                                    @Override
+                                    public RemoveServerCacheResponse call(ServerCachingService instance)
+                                            throws IOException {
+                                        ServerRpcController controller = new ServerRpcController();
+                                        BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
+                                        RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest
+                                                .newBuilder();
+                                        final byte[] tenantIdBytes;
+                                        if (cacheUsingTable.isMultiTenant()) {
+                                            try {
+                                                tenantIdBytes = connection.getTenantId() == null ? null
+                                                        : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
+                                                                cacheUsingTable.getBucketNum() != null,
+                                                                connection.getTenantId(),
+                                                                cacheUsingTable.getViewIndexId() != null);
+                                            } catch (SQLException e) {
+                                                throw new IOException(e);
+                                            }
+                                        } else {
+                                            tenantIdBytes = connection.getTenantId() == null ? null
+                                                    : connection.getTenantId().getBytes();
+                                        }
+                                        if (tenantIdBytes != null) {
+                                            builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
+                                        }
+                                        builder.setCacheId(ByteStringer.wrap(cacheId));
+                                        instance.removeServerCache(controller, builder.build(), rpcCallback);
+                                        if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
+                                        return rpcCallback.get();
+                                    }
+                                });
+                        remainingOnServers.remove(entry);
+                    } catch (Throwable t) {
+                        lastThrowable = t;
+                        LOGGER.error(addCustomAnnotations(
+                                "Error trying to remove hash cache for " + entry,
+                                connection), t);
+                    }
+                }
+            }
+            if (!remainingOnServers.isEmpty()) {
+                LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for "
+                                + remainingOnServers, connection),
+                        lastThrowable);
+            }
+        } finally {
+            cacheUsingTableMap.remove(Bytes.mapKey(cacheId));
+            Closeables.closeQuietly(iterateOverTable);
+        }
+    }
+
+    /**
+     * Create an ID to keep the cached information across other operations independent.
+     * Using simple long random number, since the length of time we need this to be unique
+     * is very limited. 
+     */
+    public static byte[] generateId() {
+        long rand = RANDOM.nextLong();
+        return Bytes.toBytes(rand);
+    }
+    
+    public static String idToString(byte[] uuid) {
+        assert(uuid.length == Bytes.SIZEOF_LONG);
+        return Long.toString(Bytes.toLong(uuid));
+    }
+
+    private static byte[] getKeyInRegion(byte[] regionStartKey) {
+        assert (regionStartKey != null);
+        if (Bytes.equals(regionStartKey, HConstants.EMPTY_START_ROW)) {
+            return KEY_IN_FIRST_REGION;
+        }
+        return regionStartKey;
+    }
+
+    public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache, HashCacheFactory cacheFactory,
+             byte[] txState, PTable pTable) throws Exception {
+        Table table = null;
+        boolean success = true;
+        byte[] cacheId = cache.getId();
+        try {
+            ConnectionQueryServices services = connection.getQueryServices();
+            
+            byte[] tableName = pTable.getPhysicalName().getBytes();
+            table = services.getTable(tableName);
+            HRegionLocation tableRegionLocation = services.getTableRegionLocation(tableName, startkeyOfRegion);
+            if(cache.isExpired(tableRegionLocation)) {
+                return false;
+            }
+			if (cache.addServer(tableRegionLocation) || services.getProps().getBoolean(HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER,false)) {
+				success = addServerCache(table, startkeyOfRegion, pTable, cacheId, cache.getCachePtr(), cacheFactory,
+						txState, false);
+			}
+			return success;
+        } finally {
+            Closeables.closeQuietly(table);
+        }
+    }
+    
+    public boolean addServerCache(Table htable, byte[] key, final PTable cacheUsingTable, final byte[] cacheId,
+            final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final byte[] txState, final boolean usePersistentCache)
+            throws Exception {
+        byte[] keyInRegion = getKeyInRegion(key);
+        final Map<byte[], AddServerCacheResponse> results;
+
+        AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
+        final byte[] tenantIdBytes;
+        if (cacheUsingTable.isMultiTenant()) {
+            try {
+                tenantIdBytes = connection.getTenantId() == null ? null
+                        : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
+                        cacheUsingTable.getBucketNum() != null, connection.getTenantId(),
+                        cacheUsingTable.getViewIndexId() != null);
+            } catch (SQLException e) {
+                throw new IOException(e);
+            }
+        } else {
+            tenantIdBytes = connection.getTenantId() == null ? null
+                    : connection.getTenantId().getBytes();
+        }
+        if (tenantIdBytes != null) {
+            builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
+        }
+        builder.setCacheId(ByteStringer.wrap(cacheId));
+        builder.setUsePersistentCache(usePersistentCache);
+        builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
+        builder.setHasProtoBufIndexMaintainer(true);
+        ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory
+                .newBuilder();
+        svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
+        builder.setCacheFactory(svrCacheFactoryBuider.build());
+        builder.setTxState(ByteStringer.wrap(txState));
+        builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION);
+        final AddServerCacheRequest request = builder.build();
+
+        try {
+            results = htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion,
+                    new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
+                        @Override
+                        public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
+                            ServerRpcController controller = new ServerRpcController();
+                            BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
+                            instance.addServerCache(controller, request, rpcCallback);
+                            if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
+                            return rpcCallback.get();
+                        }
+                    });
+        } catch (Throwable t) {
+            throw new Exception(t);
+        }
+        if (results != null && results.size() == 1) { return results.values().iterator().next().getReturn(); }
+        return false;
+    }
+    
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java
new file mode 100644
index 0000000000..e36fd09a98
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import java.io.Closeable;
+import java.sql.SQLException;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.memory.MemoryManager;
+
+
+/**
+ * 
+ * Inteface to set and set cached values for a tenant
+ *
+ * 
+ * @since 0.1
+ */
+public interface TenantCache {
+    MemoryManager getMemoryManager();
+    Closeable getServerCache(ImmutableBytesPtr cacheId);
+    Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, boolean usePersistentCache, int clientVersion) throws SQLException;
+    void removeServerCache(ImmutableBytesPtr cacheId);
+    void removeAllServerCache();
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
new file mode 100644
index 0000000000..54afaf6c4d
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.memory.MemoryManager;
+import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
+import org.apache.phoenix.util.Closeables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.phoenix.thirdparty.com.google.common.base.Ticker;
+import org.apache.phoenix.thirdparty.com.google.common.cache.Cache;
+import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder;
+import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener;
+import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalNotification;
+
+/**
+ * 
+ * Cache per tenant on server side.  Tracks memory usage for each
+ * tenat as well and rolling up usage to global memory manager.
+ * 
+ * 
+ * @since 0.1
+ */
+public class TenantCacheImpl implements TenantCache {
+    private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class);
+    private final int maxTimeToLiveMs;
+    private final int maxPersistenceTimeToLiveMs;
+    private final MemoryManager memoryManager;
+    private final Ticker ticker;
+
+    // Two caches exist: the "serverCaches" cache which is used for handling live
+    // queries, and the "persistentServerCaches" cache which is used to store data
+    // between queries. If we are out of memory, attempt to clear out entries from
+    // the persistent cache before throwing an exception.
+    private volatile Cache<ImmutableBytesPtr, CacheEntry> serverCaches;
+    private volatile Cache<ImmutableBytesPtr, CacheEntry> persistentServerCaches;
+
+    private final long EVICTION_MARGIN_BYTES = 10000000;
+
+    private static class CacheEntry implements Comparable<CacheEntry>, Closeable {
+        private ImmutableBytesPtr cacheId;
+        private ImmutableBytesWritable cachePtr;
+        private int hits;
+        private int liveQueriesCount;
+        private boolean usePersistentCache;
+        private long size;
+        private Closeable closeable;
+
+        public CacheEntry(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr,
+                ServerCacheFactory cacheFactory, byte[] txState, MemoryChunk chunk,
+                boolean usePersistentCache, boolean useProtoForIndexMaintainer,
+                int clientVersion) throws SQLException {
+            this.cacheId = cacheId;
+            this.cachePtr = cachePtr;
+            this.size = cachePtr.getLength();
+            this.hits = 0;
+            this.liveQueriesCount = 0;
+            this.usePersistentCache = usePersistentCache;
+            this.closeable = cacheFactory.newCache(cachePtr, txState, chunk, useProtoForIndexMaintainer, clientVersion);
+        }
+
+        public void close() throws IOException {
+            this.closeable.close();
+        }
+
+        synchronized public void incrementLiveQueryCount() {
+            liveQueriesCount++;
+            hits++;
+        }
+
+        synchronized public void decrementLiveQueryCount() {
+            liveQueriesCount--;
+        }
+
+        synchronized public boolean isLive() {
+            return liveQueriesCount > 0;
+        }
+
+        public boolean getUsePersistentCache() {
+            return usePersistentCache;
+        }
+
+        public ImmutableBytesPtr getCacheId() {
+            return cacheId;
+        }
+
+        private Float rank() {
+            return (float)hits;
+        }
+
+        @Override
+        public int compareTo(CacheEntry o) {
+            return rank().compareTo(o.rank());
+        }
+    }
+
+    public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, int maxPersistenceTimeToLiveMs) {
+        this(memoryManager, maxTimeToLiveMs, maxPersistenceTimeToLiveMs, Ticker.systemTicker());
+    }
+    
+    public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, int maxPersistenceTimeToLiveMs, Ticker ticker) {
+        this.memoryManager = memoryManager;
+        this.maxTimeToLiveMs = maxTimeToLiveMs;
+        this.maxPersistenceTimeToLiveMs = maxPersistenceTimeToLiveMs;
+        this.ticker = ticker;
+    }
+    
+    public Ticker getTicker() {
+        return ticker;
+    }
+    
+    // For testing
+    public void cleanUp() {
+        synchronized(this) {
+            if (serverCaches != null) {
+                serverCaches.cleanUp();
+            }
+            if (persistentServerCaches != null) {
+                persistentServerCaches.cleanUp();
+            }
+        }
+    }
+    
+    @Override
+    public MemoryManager getMemoryManager() {
+        return memoryManager;
+    }
+
+    private Cache<ImmutableBytesPtr,CacheEntry> getServerCaches() {
+        /* Delay creation of this map until it's needed */
+        if (serverCaches == null) {
+            synchronized(this) {
+                if (serverCaches == null) {
+                    serverCaches = buildCache(maxTimeToLiveMs, false);
+                }
+            }
+        }
+        return serverCaches;
+    }
+
+    private Cache<ImmutableBytesPtr,CacheEntry> getPersistentServerCaches() {
+        /* Delay creation of this map until it's needed */
+        if (persistentServerCaches == null) {
+            synchronized(this) {
+                if (persistentServerCaches == null) {
+                    persistentServerCaches = buildCache(maxPersistenceTimeToLiveMs, true);
+                }
+            }
+        }
+        return persistentServerCaches;
+    }
+
+    private Cache<ImmutableBytesPtr, CacheEntry> buildCache(final int ttl, final boolean isPersistent) {
+        CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();
+        if (isPersistent) {
+            builder.expireAfterWrite(ttl, TimeUnit.MILLISECONDS);
+        } else {
+            builder.expireAfterAccess(ttl, TimeUnit.MILLISECONDS);
+        }
+        return builder
+            .ticker(getTicker())
+            .removalListener(new RemovalListener<ImmutableBytesPtr, CacheEntry>(){
+                @Override
+                public void onRemoval(RemovalNotification<ImmutableBytesPtr, CacheEntry> notification) {
+                    if (isPersistent || !notification.getValue().getUsePersistentCache()) {
+                        Closeables.closeAllQuietly(Collections.singletonList(notification.getValue()));
+                    }
+                }
+            })
+            .build();
+    }
+
+    synchronized private void evictInactiveEntries(long bytesNeeded) {
+        LOGGER.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes");
+        CacheEntry[] entries = getPersistentServerCaches().asMap().values().toArray(new CacheEntry[]{});
+        Arrays.sort(entries);
+        long available = this.getMemoryManager().getAvailableMemory();
+        for (int i = 0; i < entries.length && available < bytesNeeded; i++) {
+            CacheEntry entry = entries[i];
+            ImmutableBytesPtr cacheId = entry.getCacheId();
+            getPersistentServerCaches().invalidate(cacheId);
+            available = this.getMemoryManager().getAvailableMemory();
+            LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have "
+                    + available + " bytes available");
+        }
+    }
+
+    private CacheEntry getIfPresent(ImmutableBytesPtr cacheId) {
+        CacheEntry entry = getPersistentServerCaches().getIfPresent(cacheId);
+        if (entry != null) {
+            return entry;
+        }
+        return getServerCaches().getIfPresent(cacheId);
+    }
+
+	@Override
+    public Closeable getServerCache(ImmutableBytesPtr cacheId) {
+        getServerCaches().cleanUp();
+        CacheEntry entry = getIfPresent(cacheId);
+        if (entry == null) {
+            return null;
+        }
+        return entry.closeable;
+    }
+
+    @Override
+    public Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, boolean usePersistentCache, int clientVersion) throws SQLException {
+        getServerCaches().cleanUp();
+        long available = this.getMemoryManager().getAvailableMemory();
+        int size = cachePtr.getLength() + txState.length;
+        if (size > available) {
+            evictInactiveEntries(size - available + EVICTION_MARGIN_BYTES);
+        }
+        MemoryChunk chunk = this.getMemoryManager().allocate(size);
+        boolean success = false;
+        try {
+            CacheEntry entry;
+            synchronized(this) {
+                entry = getIfPresent(cacheId);
+                if (entry == null) {
+                    entry = new CacheEntry(
+                        cacheId, cachePtr, cacheFactory, txState, chunk,
+                        usePersistentCache, useProtoForIndexMaintainer,
+                        clientVersion);
+                    getServerCaches().put(cacheId, entry);
+                    if (usePersistentCache) {
+                        getPersistentServerCaches().put(cacheId, entry);
+                    }
+                }
+                entry.incrementLiveQueryCount();
+            }
+            success = true;
+            return entry;
+        } finally {
+            if (!success) {
+                Closeables.closeAllQuietly(Collections.singletonList(chunk));
+            }
+        }
+    }
+
+    @Override
+    synchronized public void removeServerCache(ImmutableBytesPtr cacheId) {
+        CacheEntry entry = getServerCaches().getIfPresent(cacheId);
+        if (entry == null) {
+            return;
+        }
+        entry.decrementLiveQueryCount();
+        if (!entry.isLive()) {
+            LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get())
+                    + " is no longer live, invalidate it");
+            getServerCaches().invalidate(cacheId);
+        }
+    }
+
+    @Override
+    public void removeAllServerCache() {
+        getServerCaches().invalidateAll();
+        getPersistentServerCaches().invalidateAll();
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallRunner.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/call/CallRunner.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/call/CallRunner.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/call/CallWrapper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallWrapper.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/call/CallWrapper.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/call/CallWrapper.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java
new file mode 100644
index 0000000000..e43edb21b4
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.aggregator.ClientAggregators;
+import org.apache.phoenix.expression.aggregator.ServerAggregators;
+import org.apache.phoenix.expression.function.SingleAggregateFunction;
+import org.apache.phoenix.expression.visitor.SingleAggregateFunctionVisitor;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.Sets;
+
+/**
+ * 
+ * Class that manages aggregations during query compilation
+ *
+ * 
+ * @since 0.1
+ */
+public class AggregationManager {
+    private ClientAggregators aggregators;
+    private int position = 0;
+    
+    public AggregationManager() {
+    }
+
+    public ClientAggregators getAggregators() {
+        return aggregators;
+    }
+    
+    public boolean isEmpty() {
+        return aggregators == null || aggregators.getAggregatorCount() == 0;
+    }
+    
+    /**
+     * @return allocate the next available zero-based positional index
+     * for the client-side aggregate function.
+     */
+    protected int nextPosition() {
+        return position++;
+    }
+    
+    public void setAggregators(ClientAggregators clientAggregator) {
+        this.aggregators = clientAggregator;
+    }
+    /**
+     * Compiles projection by:
+     * 1) Adding RowCount aggregate function if not present when limiting rows. We need this
+     *    to track how many rows have been scanned.
+     * 2) Reordering aggregation functions (by putting fixed length aggregates first) to
+     *    optimize the positional access of the aggregated value.
+     */
+    public void compile(StatementContext context, GroupByCompiler.GroupBy groupBy) throws
+            SQLException {
+        final Set<SingleAggregateFunction> aggFuncSet = Sets.newHashSetWithExpectedSize(context.getExpressionManager().getExpressionCount());
+
+        Iterator<Expression> expressions = context.getExpressionManager().getExpressions();
+        while (expressions.hasNext()) {
+            Expression expression = expressions.next();
+            expression.accept(new SingleAggregateFunctionVisitor() {
+                @Override
+                public Iterator<Expression> visitEnter(SingleAggregateFunction function) {
+                    aggFuncSet.add(function);
+                    return Collections.emptyIterator();
+                }
+            });
+        }
+        if (aggFuncSet.isEmpty() && groupBy.isEmpty()) {
+            return;
+        }
+        List<SingleAggregateFunction> aggFuncs = new ArrayList<SingleAggregateFunction>(aggFuncSet);
+        Collections.sort(aggFuncs, SingleAggregateFunction.SCHEMA_COMPARATOR);
+
+        int minNullableIndex = getMinNullableIndex(aggFuncs,groupBy.isEmpty());
+        context.getScan().setAttribute(BaseScannerRegionObserverConstants.AGGREGATORS, ServerAggregators.serialize(aggFuncs, minNullableIndex));
+        ClientAggregators clientAggregators = new ClientAggregators(aggFuncs, minNullableIndex);
+        context.getAggregationManager().setAggregators(clientAggregators);
+    }
+
+    private static int getMinNullableIndex(List<SingleAggregateFunction> aggFuncs, boolean isUngroupedAggregation) {
+        int minNullableIndex = aggFuncs.size();
+        for (int i = 0; i < aggFuncs.size(); i++) {
+            SingleAggregateFunction aggFunc = aggFuncs.get(i);
+            if (isUngroupedAggregation ? aggFunc.getAggregator().isNullable() : aggFunc.getAggregatorExpression().isNullable()) {
+                minNullableIndex = i;
+                break;
+            }
+        }
+        return minNullableIndex;
+    }
+
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/BindManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BindManager.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/BindManager.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/BindManager.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnProjector.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnProjector.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnProjector.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnResolver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnResolver.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ColumnResolver.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnResolver.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CompiledOffset.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CompiledOffset.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CompiledOffset.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CompiledOffset.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
new file mode 100644
index 0000000000..052f691fa8
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -0,0 +1,1030 @@
+/*
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.apache.phoenix.execute.MutationState.RowTimestampColInfo.NULL_ROWTIMESTAMP_INFO;
+import static org.apache.phoenix.util.NumberUtil.add;
+
+import java.io.IOException;
+import java.sql.ParameterMetaData;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.cache.ServerCacheClient;
+import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.ExplainPlanAttributes
+    .ExplainPlanAttributesBuilder;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.execute.AggregatePlan;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
+import org.apache.phoenix.execute.MutationState.RowMutationState;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.hbase.index.AbstractValueGetter;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.index.PhoenixIndexCodec;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
+import org.apache.phoenix.optimize.QueryOptimizer;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.DeleteStatement;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.parse.HintNode.Hint;
+import org.apache.phoenix.parse.NamedTableNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.DelegateColumn;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PRow;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ReadOnlyTableException;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ScanUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import org.apache.phoenix.util.SchemaUtil;
+
+public class DeleteCompiler {
+    private static ParseNodeFactory FACTORY = new ParseNodeFactory();
+    
+    private final PhoenixStatement statement;
+    private final Operation operation;
+    
+    public DeleteCompiler(PhoenixStatement statement, Operation operation) {
+        this.statement = statement;
+        this.operation = operation;
+    }
+    
+    /**
+     * Handles client side deletion of rows for a DELETE statement. We determine the "best" plan to drive the query using
+     * our standard optimizer. The plan may be based on using an index, in which case we need to translate the index row
+     * key to get the data row key used to form the delete mutation. We always collect up the data table mutations, but we
+     * only collect and send the index mutations for global, immutable indexes. Local indexes and mutable indexes are always
+     * maintained on the server side.
+     * @param context StatementContext for the scan being executed
+     * @param iterator ResultIterator for the scan being executed
+     * @param bestPlan QueryPlan used to produce the iterator
+     * @param projectedTableRef TableRef containing all indexed and covered columns across all indexes on the data table
+     * @param otherTableRefs other TableRefs needed to be maintained apart from the one over which the scan is executing.
+     *  Might be other index tables (if we're driving off of the data table table), the data table (if we're driving off of
+     *  an index table), or a mix of the data table and additional index tables.
+     * @return MutationState representing the uncommitted data across the data table and indexes. Will be joined with the
+     *  MutationState on the connection over which the delete is occurring.
+     * @throws SQLException
+     */
+    private static MutationState deleteRows(StatementContext context, ResultIterator iterator, QueryPlan bestPlan, TableRef projectedTableRef, List<TableRef> otherTableRefs) throws SQLException {
+        RowProjector projector = bestPlan.getProjector();
+        TableRef tableRef = bestPlan.getTableRef();
+        PTable table = tableRef.getTable();
+        PhoenixStatement statement = context.getStatement();
+        PhoenixConnection connection = statement.getConnection();
+        PName tenantId = connection.getTenantId();
+        byte[] tenantIdBytes = null;
+        if (tenantId != null) {
+            tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
+        }
+        // we automatically flush the mutations when either auto commit is enabled, or
+        // the target table is transactional (in that case changes are not visible until we commit)
+        final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional();
+        ConnectionQueryServices services = connection.getQueryServices();
+        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
+        final long maxSizeBytes = services.getProps()
+                .getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,
+                        QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+        final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
+        MultiRowMutationState mutations = new MultiRowMutationState(batchSize);
+        List<MultiRowMutationState> otherMutations = null;
+        // If otherTableRefs is not empty, we're deleting the rows from both the index table and
+        // the data table through a single query to save executing an additional one (since we
+        // can always get the data table row key from an index row key).
+        if (!otherTableRefs.isEmpty()) {
+            otherMutations = Lists.newArrayListWithExpectedSize(otherTableRefs.size());
+            for (int i = 0; i < otherTableRefs.size(); i++) {
+                otherMutations.add(new MultiRowMutationState(batchSize));
+            }
+        }
+        List<PColumn> pkColumns = table.getPKColumns();
+        boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
+        boolean isSharedViewIndex = table.getViewIndexId() != null;
+        int offset = (table.getBucketNum() == null ? 0 : 1);
+        byte[][] values = new byte[pkColumns.size()][];
+        if (isSharedViewIndex) {
+            values[offset++] = table.getviewIndexIdType().toBytes(table.getViewIndexId());
+        }
+        if (isMultiTenant) {
+            values[offset++] = tenantIdBytes;
+        }
+        try (final PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, context)) {
+            ValueGetter getter = null;
+            if (!otherTableRefs.isEmpty()) {
+                getter = new AbstractValueGetter() {
+                    final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable();
+                    final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable();
+    
+                    @Override
+                    public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException {
+                        Cell cell = rs.getCurrentRow().getValue(ref.getFamily(), ref.getQualifier());
+                        if (cell == null) {
+                            return null;
+                        }
+                        valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+                        return valuePtr;
+                    }
+    
+                    @Override
+                    public byte[] getRowKey() {
+                        rs.getCurrentRow().getKey(rowKeyPtr);
+                        return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr);
+                    }
+                };
+            }
+            IndexMaintainer scannedIndexMaintainer = null;
+            IndexMaintainer[] maintainers = null;
+            PTable dataTable = table;
+            if (table.getType() == PTableType.INDEX) {
+                if (!otherTableRefs.isEmpty()) {
+                    // The data table is always the last one in the list if it's
+                    // not chosen as the best of the possible plans.
+                    dataTable = otherTableRefs.get(otherTableRefs.size()-1).getTable();
+                    if (!isMaintainedOnClient(table)) {
+                        // dataTable is a projected table and may not include all the indexed columns and so we need to get
+                        // the actual data table
+                        dataTable = PhoenixRuntime.getTable(connection,
+                                SchemaUtil.getTableName(dataTable.getSchemaName().getString(), dataTable.getTableName().getString()));
+                    }
+                    scannedIndexMaintainer = IndexMaintainer.create(dataTable, table, connection);
+                }
+                maintainers = new IndexMaintainer[otherTableRefs.size()];
+                for (int i = 0; i < otherTableRefs.size(); i++) {
+                    // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
+                    // expressions are used instead of server-side ones.
+                    PTable otherTable = otherTableRefs.get(i).getTable();
+                    if (otherTable.getType() == PTableType.INDEX) {
+                        // In this case, we'll convert from index row -> data row -> other index row
+                        maintainers[i] = IndexMaintainer.create(dataTable, otherTable, connection);
+                    } else {
+                        maintainers[i] = scannedIndexMaintainer;
+                    }
+                }
+            } else if (!otherTableRefs.isEmpty()) {
+                dataTable = table;
+                maintainers = new IndexMaintainer[otherTableRefs.size()];
+                for (int i = 0; i < otherTableRefs.size(); i++) {
+                    // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
+                    // expressions are used instead of server-side ones.
+                    maintainers[i] = IndexMaintainer.create(projectedTableRef.getTable(), otherTableRefs.get(i).getTable(), connection);
+                }
+
+            }
+            byte[][] viewConstants = IndexUtil.getViewConstants(dataTable);
+            int rowCount = 0;
+            while (rs.next()) {
+                ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr();  // allocate new as this is a key in a Map
+                rs.getCurrentRow().getKey(rowKeyPtr);
+                // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
+                // row key will already have its value.
+                // Check for otherTableRefs being empty required when deleting directly from the index
+                if (otherTableRefs.isEmpty() || isMaintainedOnClient(table)) {
+                    mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
+                }
+                for (int i = 0; i < otherTableRefs.size(); i++) {
+                    PTable otherTable = otherTableRefs.get(i).getTable();
+                    ImmutableBytesPtr otherRowKeyPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
+                    // Translate the data table row to the index table row
+                    if (table.getType() == PTableType.INDEX) {
+                        otherRowKeyPtr.set(scannedIndexMaintainer.buildDataRowKey(rowKeyPtr, viewConstants));
+                        if (otherTable.getType() == PTableType.INDEX) {
+                            otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, otherRowKeyPtr, null, null, rs.getCurrentRow().getValue(0).getTimestamp()));
+                        }
+                    } else {
+                        otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, rs.getCurrentRow().getValue(0).getTimestamp()));
+                    }
+                    otherMutations.get(i).put(otherRowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
+                }
+                if (mutations.size() > maxSize) {
+                    throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
+                }
+                rowCount++;
+                // Commit a batch if we are flushing automatically and we're at our batch size
+                if (autoFlush && rowCount % batchSize == 0) {
+                    MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection);
+                    connection.getMutationState().join(state);
+                    for (int i = 0; i < otherTableRefs.size(); i++) {
+                        MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection);
+                        connection.getMutationState().join(indexState);
+                    }
+                    connection.getMutationState().send();
+                    mutations.clear();
+                    if (otherMutations != null) {
+                        for (MultiRowMutationState multiRowMutationState : otherMutations) {
+                            multiRowMutationState.clear();
+                        }
+                    }
+                }
+            }
+
+            // If auto flush is true, this last batch will be committed upon return
+            int nCommittedRows = autoFlush ? (rowCount / batchSize * batchSize) : 0;
+
+            // tableRef can be index if the index table is selected by the query plan or if we do the DELETE
+            // directly on the index table. In other cases it refers to the data table
+            MutationState tableState =
+                new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
+            MutationState state;
+            if (otherTableRefs.isEmpty()) {
+                state = tableState;
+            } else {
+                state = new MutationState(maxSize, maxSizeBytes, connection);
+                // if there are other table references we need to start with an empty mutation state and
+                // then join the other states. We only need to count the data table rows that will be deleted.
+                // MutationState.join() correctly maintains that accounting and ignores the index table rows.
+                // This way we always return the correct number of rows that are deleted.
+                state.join(tableState);
+            }
+            for (int i = 0; i < otherTableRefs.size(); i++) {
+                MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection);
+                state.join(indexState);
+            }
+            return state;
+        }
+    }
+    
+    private static class DeletingParallelIteratorFactory extends MutatingParallelIteratorFactory {
+        private QueryPlan queryPlan;
+        private List<TableRef> otherTableRefs;
+        private TableRef projectedTableRef;
+        
+        private DeletingParallelIteratorFactory(PhoenixConnection connection) {
+            super(connection);
+        }
+        
+        @Override
+        protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
+            PhoenixStatement statement = new PhoenixStatement(connection);
+            /*
+             * We don't want to collect any read metrics within the child context. This is because any read metrics that
+             * need to be captured are already getting collected in the parent statement context enclosed in the result
+             * iterator being used for reading rows out.
+             */
+            StatementContext context = new StatementContext(statement, false);
+            MutationState state = deleteRows(context, iterator, queryPlan, projectedTableRef, otherTableRefs);
+            return state;
+        }
+        
+        public void setQueryPlan(QueryPlan queryPlan) {
+            this.queryPlan = queryPlan;
+        }
+        
+        public void setOtherTableRefs(List<TableRef> otherTableRefs) {
+            this.otherTableRefs = otherTableRefs;
+        }
+        
+        public void setProjectedTableRef(TableRef projectedTableRef) {
+            this.projectedTableRef = projectedTableRef;
+        }
+    }
+    
+    private List<PTable> getClientSideMaintainedIndexes(TableRef tableRef) {
+        PTable table = tableRef.getTable();
+        if (!table.getIndexes().isEmpty()) {
+            List<PTable> nonDisabledIndexes = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
+            for (PTable index : table.getIndexes()) {
+                if (!index.getIndexState().isDisabled() && isMaintainedOnClient(index)) {
+                    nonDisabledIndexes.add(index);
+                }
+            }
+            return nonDisabledIndexes;
+        }
+        return Collections.emptyList();
+    }
+
+    /**
+     * Implementation of MutationPlan that is selected if
+     * 1) the query is strictly point lookup, and
+     * 2) the query has no LIMIT clause.
+     */
+    public class MultiRowDeleteMutationPlan implements MutationPlan {
+        private final List<MutationPlan> plans;
+        private final MutationPlan firstPlan;
+        private final QueryPlan dataPlan;
+
+        public MultiRowDeleteMutationPlan(QueryPlan dataPlan, @NonNull List<MutationPlan> plans) {
+            Preconditions.checkArgument(!plans.isEmpty());
+            this.plans = plans;
+            this.firstPlan = plans.get(0);
+            this.dataPlan = dataPlan;
+        }
+        
+        @Override
+        public StatementContext getContext() {
+            return firstPlan.getContext();
+        }
+
+        @Override
+        public ParameterMetaData getParameterMetaData() {
+            return firstPlan.getParameterMetaData();
+        }
+
+        @Override
+        public ExplainPlan getExplainPlan() throws SQLException {
+            return firstPlan.getExplainPlan();
+        }
+
+        @Override
+        public MutationState execute() throws SQLException {
+            MutationState state = firstPlan.execute();
+            statement.getConnection().getMutationState().join(state);
+            for (MutationPlan plan : plans.subList(1, plans.size())) {
+                statement.getConnection().getMutationState().join(plan.execute());
+            }
+            return state;
+        }
+
+        @Override
+        public TableRef getTargetRef() {
+            return firstPlan.getTargetRef();
+        }
+
+        @Override
+        public Set<TableRef> getSourceRefs() {
+            return firstPlan.getSourceRefs();
+        }
+
+		    @Override
+		    public Operation getOperation() {
+			return operation;
+		}
+
+        @Override
+        public Long getEstimatedRowsToScan() throws SQLException {
+            Long estRows = null;
+            for (MutationPlan plan : plans) {
+                /*
+                 * If any of the plan doesn't have estimate information available, then we cannot
+                 * provide estimate for the overall plan.
+                 */
+                if (plan.getEstimatedRowsToScan() == null) {
+                    return null;
+                }
+                estRows = add(estRows, plan.getEstimatedRowsToScan());
+            }
+            return estRows;
+        }
+
+        @Override
+        public Long getEstimatedBytesToScan() throws SQLException {
+            Long estBytes = null;
+            for (MutationPlan plan : plans) {
+                /*
+                 * If any of the plan doesn't have estimate information available, then we cannot
+                 * provide estimate for the overall plan.
+                 */
+                if (plan.getEstimatedBytesToScan() == null) {
+                    return null;
+                }
+                estBytes = add(estBytes, plan.getEstimatedBytesToScan());
+            }
+            return estBytes;
+        }
+
+        @Override
+        public Long getEstimateInfoTimestamp() throws SQLException {
+            Long estInfoTimestamp = Long.MAX_VALUE;
+            for (MutationPlan plan : plans) {
+                Long timestamp = plan.getEstimateInfoTimestamp();
+                /*
+                 * If any of the plan doesn't have estimate information available, then we cannot
+                 * provide estimate for the overall plan.
+                 */
+                if (timestamp == null) {
+                    return timestamp;
+                }
+                estInfoTimestamp = Math.min(estInfoTimestamp, timestamp);
+            }
+            return estInfoTimestamp;
+        }
+
+        @Override
+        public QueryPlan getQueryPlan() {
+            return dataPlan;
+        }
+    }
+
+    public MutationPlan compile(DeleteStatement delete) throws SQLException {
+        final PhoenixConnection connection = statement.getConnection();
+        final boolean isAutoCommit = connection.getAutoCommit();
+        final boolean hasPostProcessing = delete.getLimit() != null;
+        final ConnectionQueryServices services = connection.getQueryServices();
+        List<QueryPlan> queryPlans;
+        boolean allowServerMutations =
+                services.getProps().getBoolean(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+                        QueryServicesOptions.DEFAULT_ENABLE_SERVER_SIDE_DELETE_MUTATIONS);
+        NamedTableNode tableNode = delete.getTable();
+        String tableName = tableNode.getName().getTableName();
+        String schemaName = tableNode.getName().getSchemaName();
+        SelectStatement select = null;
+        ColumnResolver resolverToBe = null;
+        DeletingParallelIteratorFactory parallelIteratorFactoryToBe;
+        resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
+        final TableRef targetTableRef = resolverToBe.getTables().get(0);
+        PTable table = targetTableRef.getTable();
+        // Cannot update:
+        // - read-only VIEW 
+        // - transactional table with a connection having an SCN
+        // TODO: SchemaUtil.isReadOnly(PTable, connection)?
+        if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
+            throw new ReadOnlyTableException(schemaName,tableName);
+        }
+        else if (table.isTransactional() && connection.getSCN() != null) {
+           throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName)
+           .setTableName(tableName).build().buildException();
+        }
+        
+        List<PTable> clientSideIndexes = getClientSideMaintainedIndexes(targetTableRef);
+        final boolean hasClientSideIndexes = !clientSideIndexes.isEmpty();
+
+        boolean isSalted = table.getBucketNum() != null;
+        boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
+        boolean isSharedViewIndex = table.getViewIndexId() != null;
+        int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
+        final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset;
+        int selectColumnCount = pkColumnCount;
+        for (PTable index : clientSideIndexes) {
+            selectColumnCount += index.getPKColumns().size() - pkColumnCount;
+        }
+        Set<PColumn> projectedColumns = new LinkedHashSet<PColumn>(selectColumnCount + pkColumnOffset);
+        List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount);
+        for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) {
+            PColumn column = table.getPKColumns().get(i);
+            projectedColumns.add(column);
+        }
+        for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) {
+            PColumn column = table.getPKColumns().get(i);
+            projectedColumns.add(column);
+            aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
+        }
+        // Project all non PK indexed columns so that we can do the proper index maintenance on the indexes for which
+        // mutations are generated on the client side. Indexed columns are needed to identify index rows to be deleted
+        for (PTable index : table.getIndexes()) {
+            if (isMaintainedOnClient(index)) {
+                IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
+                // Go through maintainer as it handles functional indexes correctly
+                for (Pair<String, String> columnInfo : maintainer.getIndexedColumnInfo()) {
+                    String familyName = columnInfo.getFirst();
+                    if (familyName != null) {
+                        String columnName = columnInfo.getSecond();
+                        boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty();
+                        PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
+                        if (!projectedColumns.contains(column)) {
+                            projectedColumns.add(column);
+                            aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null)));
+                        }
+                    }
+                }
+            }
+        }
+        select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, delete.getWhere(),
+                Collections.<ParseNode> emptyList(), null, delete.getOrderBy(), delete.getLimit(), null,
+                delete.getBindCount(), false, false, Collections.<SelectStatement> emptyList(),
+                delete.getUdfParseNodes());
+        select = StatementNormalizer.normalize(select, resolverToBe);
+        
+        SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
+        boolean hasPreProcessing = transformedSelect != select;
+        if (transformedSelect != select) {
+            resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
+            select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
+        }
+        final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing;
+        boolean noQueryReqd = !hasPreOrPostProcessing;
+        // No limit and no sub queries, joins, etc in where clause
+        // Can't run on same server for transactional data, as we need the row keys for the data
+        // that is being upserted for conflict detection purposes.
+        // If we have immutable indexes, we'd increase the number of bytes scanned by executing
+        // separate queries against each index, so better to drive from a single table in that case.
+        boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() && !hasClientSideIndexes && allowServerMutations;
+        HintNode hint = delete.getHint();
+        if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
+            select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE));
+        }
+        
+        parallelIteratorFactoryToBe = hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection);
+        QueryOptimizer optimizer = new QueryOptimizer(services);
+        QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe, new SequenceManager(statement));
+        final QueryPlan dataPlan = compiler.compile();
+        // TODO: the select clause should know that there's a sub query, but doesn't seem to currently
+        queryPlans = Lists.newArrayList(!clientSideIndexes.isEmpty()
+                ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe)
+                : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe));
+
+        runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX;
+
+        // We need to have all indexed columns available in all immutable indexes in order
+        // to generate the delete markers from the query. We also cannot have any filters
+        // except for our SkipScanFilter for point lookups.
+        // A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause
+        // may have been optimized out. Instead, we check that there's a single SkipScanFilter
+        // If we can generate a plan for every index, that means all the required columns are available in every index,
+        // hence we can drive the delete from any of the plans.
+        noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
+        int queryPlanIndex = 0;
+        while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
+            QueryPlan plan = queryPlans.get(queryPlanIndex++);
+            StatementContext context = plan.getContext();
+            noQueryReqd &= (!context.getScan().hasFilter()
+                    || context.getScan().getFilter() instanceof SkipScanFilter)
+                && context.getScanRanges().isPointLookup();
+        }
+
+        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
+        final long maxSizeBytes = services.getProps()
+                .getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,
+                        QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+ 
+        // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all.
+        if (noQueryReqd) {
+            // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
+            // from the data table, while the others will be for deleting rows from immutable indexes.
+            List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size());
+            for (final QueryPlan plan : queryPlans) {
+                mutationPlans.add(new SingleRowDeleteMutationPlan(plan, connection, maxSize, maxSizeBytes));
+            }
+            return new MultiRowDeleteMutationPlan(dataPlan, mutationPlans);
+        } else if (runOnServer) {
+            // TODO: better abstraction
+            final StatementContext context = dataPlan.getContext();
+            Scan scan = context.getScan();
+            scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_AGG, QueryConstants.TRUE);
+
+            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
+            // The coprocessor will delete each row returned from the scan
+            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
+            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
+            RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
+            context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
+            if (dataPlan.getProjector().projectEveryRow()) {
+                projectorToBe = new RowProjector(projectorToBe,true);
+            }
+            final RowProjector projector = projectorToBe;
+            final QueryPlan aggPlan = new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null,
+                    OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, dataPlan);
+            return new ServerSelectDeleteMutationPlan(dataPlan, connection, aggPlan, projector, maxSize, maxSizeBytes);
+        } else {
+            final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
+            List<PColumn> adjustedProjectedColumns = Lists.newArrayListWithExpectedSize(projectedColumns.size());
+            final int offset = table.getBucketNum() == null ? 0 : 1;
+            Iterator<PColumn> projectedColsItr = projectedColumns.iterator();
+            int i = 0;
+            while (projectedColsItr.hasNext()) {
+                final int position = i++;
+                adjustedProjectedColumns.add(new DelegateColumn(projectedColsItr.next()) {
+                    @Override
+                    public int getPosition() {
+                        return position + offset;
+                    }
+                });
+            }
+            PTable projectedTable = PTableImpl.builderWithColumns(table, adjustedProjectedColumns)
+                    .setType(PTableType.PROJECTED)
+                    .build();
+            final TableRef projectedTableRef = new TableRef(projectedTable, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp());
+
+            QueryPlan bestPlanToBe = dataPlan;
+            for (QueryPlan plan : queryPlans) {
+                PTable planTable = plan.getTableRef().getTable();
+                if (planTable.getIndexState() != PIndexState.BUILDING) {
+                    bestPlanToBe = plan;
+                    break;
+                }
+            }
+            final QueryPlan bestPlan = bestPlanToBe;
+            final List<TableRef>otherTableRefs = Lists.newArrayListWithExpectedSize(clientSideIndexes.size());
+            for (PTable index : clientSideIndexes) {
+                if (!bestPlan.getTableRef().getTable().equals(index)) {
+                    otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()));
+                }
+            }
+            
+            if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) {
+                otherTableRefs.add(projectedTableRef);
+            }
+            return new ClientSelectDeleteMutationPlan(targetTableRef, dataPlan, bestPlan, hasPreOrPostProcessing,
+                    parallelIteratorFactory, otherTableRefs, projectedTableRef, maxSize, maxSizeBytes, connection);
+        }
+    }
+
+    /**
+     * Implementation of MutationPlan for composing a MultiRowDeleteMutationPlan.
+     */
+    private class SingleRowDeleteMutationPlan implements MutationPlan {
+
+        private final QueryPlan dataPlan;
+        private final PhoenixConnection connection;
+        private final int maxSize;
+        private final StatementContext context;
+        private final long maxSizeBytes;
+
+        public SingleRowDeleteMutationPlan(QueryPlan dataPlan, PhoenixConnection connection, int maxSize, long maxSizeBytes) {
+            this.dataPlan = dataPlan;
+            this.connection = connection;
+            this.maxSize = maxSize;
+            this.context = dataPlan.getContext();
+            this.maxSizeBytes = maxSizeBytes;
+        }
+
+        @Override
+        public ParameterMetaData getParameterMetaData() {
+            return context.getBindManager().getParameterMetaData();
+        }
+
+        @Override
+        public MutationState execute() throws SQLException {
+            // We have a point lookup, so we know we have a simple set of fully qualified
+            // keys for our ranges
+            ScanRanges ranges = context.getScanRanges();
+            Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
+            MultiRowMutationState mutation = new MultiRowMutationState(ranges.getPointLookupCount());
+            while (iterator.hasNext()) {
+                mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()),
+                        new RowMutationState(PRow.DELETE_MARKER, 0,
+                                statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
+            }
+            return new MutationState(dataPlan.getTableRef(), mutation, 0, maxSize, maxSizeBytes, connection);
+        }
+
+        @Override
+        public ExplainPlan getExplainPlan() throws SQLException {
+            return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
+        }
+
+        @Override
+        public QueryPlan getQueryPlan() {
+            return dataPlan;
+        }
+
+        @Override
+        public StatementContext getContext() {
+            return context;
+        }
+
+        @Override
+        public TableRef getTargetRef() {
+            return dataPlan.getTableRef();
+        }
+
+        @Override
+        public Set<TableRef> getSourceRefs() {
+            // Don't include the target
+            return Collections.emptySet();
+        }
+
+        @Override
+        public Operation getOperation() {
+          return operation;
+        }
+
+        @Override
+        public Long getEstimatedRowsToScan() throws SQLException {
+            return 0l;
+        }
+
+        @Override
+        public Long getEstimatedBytesToScan() throws SQLException {
+            return 0l;
+        }
+
+        @Override
+        public Long getEstimateInfoTimestamp() throws SQLException {
+            return 0l;
+        }
+    }
+
+    /**
+     * Implementation of MutationPlan that is selected if
+     * 1) there is no immutable index presented for the table,
+     * 2) auto commit is enabled as well as server side delete mutations are enabled,
+     * 3) the table is not transactional,
+     * 4) the query has no LIMIT clause, and
+     * 5) the query has WHERE clause and is not strictly point lookup.
+     */
+    public class ServerSelectDeleteMutationPlan implements MutationPlan {
+        private final StatementContext context;
+        private final QueryPlan dataPlan;
+        private final PhoenixConnection connection;
+        private final QueryPlan aggPlan;
+        private final RowProjector projector;
+        private final int maxSize;
+        private final long maxSizeBytes;
+
+        public ServerSelectDeleteMutationPlan(QueryPlan dataPlan, PhoenixConnection connection, QueryPlan aggPlan,
+                                              RowProjector projector, int maxSize, long maxSizeBytes) {
+            this.context = dataPlan.getContext();
+            this.dataPlan = dataPlan;
+            this.connection = connection;
+            this.aggPlan = aggPlan;
+            this.projector = projector;
+            this.maxSize = maxSize;
+            this.maxSizeBytes = maxSizeBytes;
+        }
+
+        @Override
+        public ParameterMetaData getParameterMetaData() {
+            return context.getBindManager().getParameterMetaData();
+        }
+
+        @Override
+        public StatementContext getContext() {
+            return context;
+        }
+
+        @Override
+        public TableRef getTargetRef() {
+            return dataPlan.getTableRef();
+        }
+
+        @Override
+        public Set<TableRef> getSourceRefs() {
+            return dataPlan.getSourceRefs();
+        }
+
+        @Override
+        public Operation getOperation() {
+          return operation;
+        }
+
+        @Override
+        public MutationState execute() throws SQLException {
+            // TODO: share this block of code with UPSERT SELECT
+            ImmutableBytesWritable ptr = context.getTempPtr();
+            PTable table = dataPlan.getTableRef().getTable();
+            table.getIndexMaintainers(ptr, context.getConnection());
+            ScanUtil.annotateScanWithMetadataAttributes(table, context.getScan());
+            byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
+            ServerCache cache = null;
+            try {
+                if (ptr.getLength() > 0) {
+                    byte[] uuidValue = ServerCacheClient.generateId();
+                    context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
+                    context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
+                    context.getScan().setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState);
+                    ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION);
+                    String sourceOfDelete = statement.getConnection().getSourceOfOperation();
+                    if (sourceOfDelete != null) {
+                        context.getScan().setAttribute(QueryServices.SOURCE_OPERATION_ATTRIB,
+                                Bytes.toBytes(sourceOfDelete));
+                    }
+                }
+                ResultIterator iterator = aggPlan.iterator();
+                try {
+                    Tuple row = iterator.next();
+                    final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
+                    return new MutationState(maxSize, maxSizeBytes, connection) {
+                        @Override
+                        public long getUpdateCount() {
+                            return mutationCount;
+                        }
+                    };
+                } finally {
+                    iterator.close();
+                }
+            } finally {
+                if (cache != null) {
+                    cache.close();
+                }
+            }
+        }
+
+        @Override
+        public ExplainPlan getExplainPlan() throws SQLException {
+            ExplainPlan explainPlan = aggPlan.getExplainPlan();
+            List<String> queryPlanSteps = explainPlan.getPlanSteps();
+            ExplainPlanAttributes explainPlanAttributes =
+                explainPlan.getPlanStepsAsAttributes();
+            List<String> planSteps =
+                Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
+            ExplainPlanAttributesBuilder newBuilder =
+                new ExplainPlanAttributesBuilder(explainPlanAttributes);
+            newBuilder.setAbstractExplainPlan("DELETE ROWS SERVER SELECT");
+            planSteps.add("DELETE ROWS SERVER SELECT");
+            planSteps.addAll(queryPlanSteps);
+            return new ExplainPlan(planSteps, newBuilder.build());
+        }
+
+        @Override
+        public Long getEstimatedRowsToScan() throws SQLException {
+            return aggPlan.getEstimatedRowsToScan();
+        }
+
+        @Override
+        public Long getEstimatedBytesToScan() throws SQLException {
+            return aggPlan.getEstimatedBytesToScan();
+        }
+
+        @Override
+        public Long getEstimateInfoTimestamp() throws SQLException {
+            return aggPlan.getEstimateInfoTimestamp();
+        }
+
+        @Override
+        public QueryPlan getQueryPlan() {
+            return aggPlan;
+        }
+    }
+
+    /**
+     * Implementation of MutationPlan that is selected if the query doesn't match the criteria of
+     * ServerSelectDeleteMutationPlan.
+     */
+    public class ClientSelectDeleteMutationPlan implements MutationPlan {
+        private final StatementContext context;
+        private final TableRef targetTableRef;
+        private final QueryPlan dataPlan;
+        private final QueryPlan bestPlan;
+        private final boolean hasPreOrPostProcessing;
+        private final DeletingParallelIteratorFactory parallelIteratorFactory;
+        private final List<TableRef> otherTableRefs;
+        private final TableRef projectedTableRef;
+        private final int maxSize;
+        private final long maxSizeBytes;
+        private final PhoenixConnection connection;
+
+        public ClientSelectDeleteMutationPlan(TableRef targetTableRef, QueryPlan dataPlan, QueryPlan bestPlan,
+                                              boolean hasPreOrPostProcessing,
+                                              DeletingParallelIteratorFactory parallelIteratorFactory,
+                                              List<TableRef> otherTableRefs, TableRef projectedTableRef, int maxSize,
+                                              long maxSizeBytes, PhoenixConnection connection) {
+            this.context = bestPlan.getContext();
+            this.targetTableRef = targetTableRef;
+            this.dataPlan = dataPlan;
+            this.bestPlan = bestPlan;
+            this.hasPreOrPostProcessing = hasPreOrPostProcessing;
+            this.parallelIteratorFactory = parallelIteratorFactory;
+            this.otherTableRefs = otherTableRefs;
+            this.projectedTableRef = projectedTableRef;
+            this.maxSize = maxSize;
+            this.maxSizeBytes = maxSizeBytes;
+            this.connection = connection;
+        }
+
+        @Override
+        public ParameterMetaData getParameterMetaData() {
+            return context.getBindManager().getParameterMetaData();
+        }
+
+        @Override
+        public StatementContext getContext() {
+            return context;
+        }
+
+        @Override
+        public TableRef getTargetRef() {
+            return targetTableRef;
+        }
+
+        @Override
+        public Set<TableRef> getSourceRefs() {
+            return dataPlan.getSourceRefs();
+        }
+
+        @Override
+        public Operation getOperation() {
+          return operation;
+        }
+
+        @Override
+        public MutationState execute() throws SQLException {
+            ResultIterator iterator = bestPlan.iterator();
+            try {
+                // If we're not doing any pre or post processing, we can produce the delete mutations directly
+                // in the parallel threads executed for the scan
+                if (!hasPreOrPostProcessing) {
+                    Tuple tuple;
+                    long totalRowCount = 0;
+                    if (parallelIteratorFactory != null) {
+                        parallelIteratorFactory.setQueryPlan(bestPlan);
+                        parallelIteratorFactory.setOtherTableRefs(otherTableRefs);
+                        parallelIteratorFactory.setProjectedTableRef(projectedTableRef);
+                    }
+                    while ((tuple=iterator.next()) != null) {// Runs query
+                        Cell kv = tuple.getValue(0);
+                        totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
+                    }
+                    // Return total number of rows that have been deleted from the table. In the case of auto commit being off
+                    // the mutations will all be in the mutation state of the current connection.
+                    MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
+
+                    // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
+                    state.setReadMetricQueue(context.getReadMetricsQueue());
+
+                    return state;
+                } else {
+                    // Otherwise, we have to execute the query and produce the delete mutations in the single thread
+                    // producing the query results.
+                    return deleteRows(context, iterator, bestPlan, projectedTableRef, otherTableRefs);
+                }
+            } finally {
+                iterator.close();
+            }
+        }
+
+        @Override
+        public ExplainPlan getExplainPlan() throws SQLException {
+            ExplainPlan explainPlan = bestPlan.getExplainPlan();
+            List<String> queryPlanSteps = explainPlan.getPlanSteps();
+            ExplainPlanAttributes explainPlanAttributes =
+                explainPlan.getPlanStepsAsAttributes();
+            List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
+            ExplainPlanAttributesBuilder newBuilder =
+                new ExplainPlanAttributesBuilder(explainPlanAttributes);
+            newBuilder.setAbstractExplainPlan("DELETE ROWS CLIENT SELECT");
+            planSteps.add("DELETE ROWS CLIENT SELECT");
+            planSteps.addAll(queryPlanSteps);
+            return new ExplainPlan(planSteps, newBuilder.build());
+        }
+
+        @Override
+        public Long getEstimatedRowsToScan() throws SQLException {
+            return bestPlan.getEstimatedRowsToScan();
+        }
+
+        @Override
+        public Long getEstimatedBytesToScan() throws SQLException {
+            return bestPlan.getEstimatedBytesToScan();
+        }
+
+        @Override
+        public Long getEstimateInfoTimestamp() throws SQLException {
+            return bestPlan.getEstimateInfoTimestamp();
+        }
+
+        @Override
+        public QueryPlan getQueryPlan() {
+            return bestPlan;
+        }
+    }
+    
+    private static boolean isMaintainedOnClient(PTable table) {
+        // Test for not being local (rather than being GLOBAL) so that this doesn't fail
+        // when tested with our projected table.
+        return (table.getIndexType() != IndexType.LOCAL && (table.isTransactional() || table.isImmutableRows())) ||
+               (table.getIndexType() == IndexType.LOCAL && (table.isTransactional() &&
+                table.getTransactionProvider().getTransactionProvider().isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER) ) );
+    }
+    
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExplainPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlan.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ExplainPlan.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlan.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionManager.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionManager.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionManager.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java
new file mode 100644
index 0000000000..104095ad4c
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -0,0 +1,1233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MetaDataMutationResult;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.BindTableNode;
+import org.apache.phoenix.parse.ColumnDef;
+import org.apache.phoenix.parse.CreateTableStatement;
+import org.apache.phoenix.parse.DMLStatement;
+import org.apache.phoenix.parse.DerivedTableNode;
+import org.apache.phoenix.parse.FamilyWildcardParseNode;
+import org.apache.phoenix.parse.JoinTableNode;
+import org.apache.phoenix.parse.NamedTableNode;
+import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.SingleTableStatement;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.parse.TableNode;
+import org.apache.phoenix.parse.TableNodeVisitor;
+import org.apache.phoenix.parse.TableWildcardParseNode;
+import org.apache.phoenix.parse.UDFParseNode;
+import org.apache.phoenix.parse.UseSchemaStatement;
+import org.apache.phoenix.parse.WildcardParseNode;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.AmbiguousTableException;
+import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.FunctionNotFoundException;
+import org.apache.phoenix.schema.IndexNotFoundException;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PColumnFamily;
+import org.apache.phoenix.schema.PColumnFamilyImpl;
+import org.apache.phoenix.schema.PColumnImpl;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
+import org.apache.phoenix.schema.RowKeySchema;
+import org.apache.phoenix.schema.SchemaNotFoundException;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.LogUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TransactionUtil;
+import org.apache.phoenix.monitoring.TableMetricsManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap;
+import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES;
+
+/**
+ * Validates FROM clause and builds a ColumnResolver for resolving column references
+ *
+ *
+ * @since 0.1
+ */
+public class FromCompiler {
+    private static final Logger LOGGER = LoggerFactory.getLogger(FromCompiler.class);
+
+    public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() {
+
+        @Override
+        public List<TableRef> getTables() {
+            return Collections.singletonList(TableRef.EMPTY_TABLE_REF);
+        }
+
+        @Override
+        public List<PFunction> getFunctions() {
+            return Collections.emptyList();
+        }
+
+        @Override
+        public TableRef resolveTable(String schemaName, String tableName)
+                throws SQLException {
+            throw new TableNotFoundException(schemaName, tableName);
+        }
+
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+            throw new ColumnNotFoundException(schemaName, tableName, null, colName);
+        }
+        
+        @Override
+        public PFunction resolveFunction(String functionName) throws SQLException {
+            throw new FunctionNotFoundException(functionName);
+        }
+
+        @Override
+        public boolean hasUDFs() {
+            return false;
+        }
+
+        @Override
+        public PSchema resolveSchema(String schemaName) throws SQLException {
+            throw new SchemaNotFoundException(schemaName);
+        }
+
+        @Override
+        public List<PSchema> getSchemas() {
+            return Collections.emptyList();
+        }
+
+    };
+
+    public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection)
+            throws SQLException {
+    	
+        TableName baseTable = statement.getBaseTableName();
+        String schemaName;
+        if (SchemaUtil.isSchemaCheckRequired(statement.getTableType(),
+                connection.getQueryServices().getProps())) {
+            // To ensure schema set through properties or connection
+            // string exists before creating table
+            schemaName = statement.getTableName().getSchemaName() != null
+                    ? statement.getTableName().getSchemaName() : connection.getSchema();
+            if (schemaName != null) {
+                // Only create SchemaResolver object to check if constructor throws exception.
+                // No exception means schema exists
+                new SchemaResolver(connection, schemaName, true);
+            }
+        }
+        if (baseTable == null) {
+            return EMPTY_TABLE_RESOLVER;
+        }
+        NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
+        // Always use non-tenant-specific connection here
+        try {
+            SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true);
+            return visitor;
+        } catch (TableNotFoundException e) {
+            // Used for mapped VIEW, since we won't be able to resolve that.
+            // Instead, we create a table with just the dynamic columns.
+            // A tenant-specific connection may not create a mapped VIEW.
+            if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
+                ConnectionQueryServices services = connection.getQueryServices();
+                boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(statement.getTableType(), connection.getQueryServices().getProps());
+                byte[] fullTableName = SchemaUtil.getPhysicalHBaseTableName(
+                    baseTable.getSchemaName(), baseTable.getTableName(), isNamespaceMapped).getBytes();
+                Table htable = null;
+                try {
+                    htable = services.getTable(fullTableName);
+                } catch (UnsupportedOperationException ignore) {
+                    throw e; // For Connectionless
+                } finally {
+                    if (htable != null) Closeables.closeQuietly(htable);
+                }
+                tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
+                return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), isNamespaceMapped);
+            }
+            throw e;
+        }
+    }
+
+    public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection)
+            throws SQLException{
+        return getResolverForQuery(statement, connection, false, null);
+    }
+
+    /**
+     * Iterate through the nodes in the FROM clause to build a column resolver used to lookup a column given the name
+     * and alias.
+     *
+     * @param statement
+     *            the select statement
+     * @return the column resolver
+     * @throws SQLException
+     * @throws SQLFeatureNotSupportedException
+     *             if unsupported constructs appear in the FROM clause
+     * @throws TableNotFoundException
+     *             if table name not found in schema
+     */
+    public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection, boolean alwaysHitServer, TableName mutatingTableName)
+    		throws SQLException {
+    	TableNode fromNode = statement.getFrom();
+    	if (fromNode == null)
+            return new ColumnResolverWithUDF(connection, 1, true, statement.getUdfParseNodes());
+        if (fromNode instanceof NamedTableNode)
+            return new SingleTableColumnResolver(connection, (NamedTableNode) fromNode, true, 1, statement.getUdfParseNodes(), alwaysHitServer, mutatingTableName);
+
+        MultiTableColumnResolver visitor = new MultiTableColumnResolver(connection, 1, statement.getUdfParseNodes(), mutatingTableName);
+        fromNode.accept(visitor);
+        return visitor;
+    }
+
+    /**
+     * Refresh the inner state of {@link MultiTableColumnResolver} for the derivedTableNode when
+     * the derivedTableNode is changed for some sql optimization.
+     * @param columnResolver
+     * @param derivedTableNode
+     * @return
+     * @throws SQLException
+     */
+    public static TableRef refreshDerivedTableNode(
+            ColumnResolver columnResolver, DerivedTableNode derivedTableNode) throws SQLException {
+        if (!(columnResolver instanceof MultiTableColumnResolver)) {
+            throw new UnsupportedOperationException();
+        }
+        return ((MultiTableColumnResolver)columnResolver).refreshDerivedTableNode(derivedTableNode);
+    }
+
+    public static ColumnResolver getResolverForSchema(UseSchemaStatement statement, PhoenixConnection connection)
+            throws SQLException {
+        return new SchemaResolver(connection, SchemaUtil.normalizeIdentifier(statement.getSchemaName()), true);
+    }
+
+    public static ColumnResolver getResolverForSchema(String schema, PhoenixConnection connection) throws SQLException {
+        return new SchemaResolver(connection, schema, true);
+    }
+
+    public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection) throws SQLException {
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true);
+        return visitor;
+    }
+
+    public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection, boolean updateCacheImmediately) throws SQLException {
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, updateCacheImmediately);
+        return visitor;
+    }
+
+    public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection, Map<String, UDFParseNode> udfParseNodes) throws SQLException {
+        SingleTableColumnResolver visitor =
+                new SingleTableColumnResolver(connection, tableNode, true, 0, udfParseNodes);
+        return visitor;
+    }
+
+    public static ColumnResolver getResolver(SingleTableStatement statement, PhoenixConnection connection)
+            throws SQLException {
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), true);
+        return visitor;
+    }
+
+    public static ColumnResolver getIndexResolver(SingleTableStatement statement,
+                              PhoenixConnection connection) throws SQLException {
+        try {
+            return getResolver(statement, connection);
+        } catch (TableNotFoundException e) {
+            throw new IndexNotFoundException(e.getSchemaName(), e.getTableName(), e.getTimeStamp());
+        }
+    }
+
+    public static ColumnResolver getResolver(SingleTableStatement statement, PhoenixConnection connection, Map<String, UDFParseNode> udfParseNodes)
+            throws SQLException {
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), true, 0, udfParseNodes);
+        return visitor;
+    }
+
+    public static ColumnResolver getResolverForCompiledDerivedTable(PhoenixConnection connection, TableRef tableRef, RowProjector projector)
+            throws SQLException {
+        List<PColumn> projectedColumns = new ArrayList<PColumn>();
+        PTable table = tableRef.getTable();
+        for (PColumn column : table.getColumns()) {
+            Expression sourceExpression = projector.getColumnProjector(column.getPosition()).getExpression();
+            PColumnImpl projectedColumn = new PColumnImpl(column.getName(), column.getFamilyName(),
+                    sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
+                    column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(),
+                column.getTimestamp());
+            projectedColumns.add(projectedColumn);
+        }
+        PTable t = PTableImpl.builderWithColumns(table, projectedColumns)
+                .build();
+        return new SingleTableColumnResolver(connection, new TableRef(tableRef.getTableAlias(), t, tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols()));
+    }
+
+    public static ColumnResolver getResolver(TableRef tableRef)
+            throws SQLException {
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(tableRef);
+        return visitor;
+    }
+
+    public static ColumnResolver getResolver(PhoenixConnection connection, TableRef tableRef, Map<String, UDFParseNode> udfParseNodes)
+            throws SQLException {
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableRef, udfParseNodes, null);
+        return visitor;
+    }
+
+    public static ColumnResolver getResolverForMutation(DMLStatement statement, PhoenixConnection connection)
+            throws SQLException {
+        /*
+         * We validate the meta data at commit time for mutations, as this allows us to do many UPSERT VALUES calls
+         * without hitting the server each time to check if the meta data is up-to-date.
+         */
+        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), false, 0,  statement.getUdfParseNodes());
+        return visitor;
+    }
+    
+    public static ColumnResolver getResolverForProjectedTable(PTable projectedTable, PhoenixConnection connection, Map<String, UDFParseNode> udfParseNodes) throws SQLException {
+        return new ProjectedTableColumnResolver(projectedTable, connection, udfParseNodes);
+    }
+
+    private static class SchemaResolver extends BaseColumnResolver {
+        private final List<PSchema> schemas;
+
+        public SchemaResolver(PhoenixConnection conn, String schemaName, boolean updateCacheImmediately)
+                throws SQLException {
+            super(conn, 0, null);
+            schemaName = connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName;
+            schemas = ImmutableList.of(createSchemaRef(schemaName, updateCacheImmediately));
+        }
+
+        @Override
+        public List<TableRef> getTables() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public PSchema resolveSchema(String schemaName) throws SQLException {
+            return schemas.get(0);
+        }
+
+        @Override
+        public List<PSchema> getSchemas() {
+            return schemas;
+        }
+
+    }
+
+    private static class SingleTableColumnResolver extends BaseColumnResolver {
+    	private final List<TableRef> tableRefs;
+    	private final String alias;
+        private final List<PSchema> schemas;
+
+        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode table, long timeStamp, Map<String, UDFParseNode> udfParseNodes, boolean isNamespaceMapped) throws SQLException {
+            super(connection, 0, false, udfParseNodes, null);
+            List<PColumnFamily> families = Lists.newArrayListWithExpectedSize(table.getDynamicColumns().size());
+            for (ColumnDef def : table.getDynamicColumns()) {
+                if (def.getColumnDefName().getFamilyName() != null) {
+                    families.add(new PColumnFamilyImpl(PNameFactory.newName(def.getColumnDefName().getFamilyName()),Collections.<PColumn>emptyList()));//, NON_ENCODED_QUALIFIERS));
+                }
+            }
+            Long scn = connection.getSCN();
+            String schema = table.getName().getSchemaName();
+            if (connection.getSchema() != null) {
+                schema = schema != null ? schema : connection.getSchema();
+            }
+
+            // Storage scheme and encoding scheme don't matter here since the PTable is being used only for the purposes of create table.
+            // The actual values of these two will be determined by the metadata client.
+            PName tenantId = connection.getTenantId();
+            PTableImpl.checkTenantId(tenantId);
+            String tableName = table.getName().getTableName();
+            PName name = PNameFactory.newName(SchemaUtil.getTableName(schema, tableName));
+            PTable theTable = new PTableImpl.Builder()
+                    .setTenantId(tenantId)
+                    .setName(name)
+                    .setKey(new PTableKey(tenantId, name.getString()))
+                    .setSchemaName(PNameFactory.newName(schema))
+                    .setTableName(PNameFactory.newName(tableName))
+                    .setType(PTableType.VIEW)
+                    .setViewType(PTable.ViewType.MAPPED)
+                    .setTimeStamp(scn == null ? HConstants.LATEST_TIMESTAMP : scn)
+                    .setPkColumns(Collections.emptyList())
+                    .setAllColumns(Collections.emptyList())
+                    .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA)
+                    .setIndexes(Collections.emptyList())
+                    .setFamilyAttributes(families)
+                    .setPhysicalNames(Collections.emptyList())
+                    .setNamespaceMapped(isNamespaceMapped)
+                    .build();
+            theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable);
+            alias = null;
+            tableRefs = ImmutableList.of(new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty()));
+            schemas = ImmutableList.of(new PSchema(theTable.getSchemaName().toString(), timeStamp));
+        }
+
+        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException {
+            this(connection, tableNode, updateCacheImmediately, 0, new HashMap<String,UDFParseNode>(1));
+        }
+        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode,
+            boolean updateCacheImmediately, boolean alwaysHitServer) throws SQLException {
+          this(connection, tableNode, updateCacheImmediately, 0, new HashMap<String,UDFParseNode>(1), alwaysHitServer, null);
+      }
+        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode,
+            boolean updateCacheImmediately, int tsAddition,
+            Map<String, UDFParseNode> udfParseNodes) throws SQLException {
+          this(connection, tableNode, updateCacheImmediately, tsAddition, udfParseNodes, false, null);
+        }
+
+        public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode,
+                boolean updateCacheImmediately, int tsAddition,
+                Map<String, UDFParseNode> udfParseNodes, boolean alwaysHitServer, TableName mutatingTableName) throws SQLException {
+            super(connection, tsAddition, updateCacheImmediately, udfParseNodes, mutatingTableName);
+            alias = tableNode.getAlias();
+            TableRef tableRef = createTableRef(tableNode.getName().getSchemaName(), tableNode, updateCacheImmediately, alwaysHitServer);
+			PSchema schema = new PSchema(tableRef.getTable().getSchemaName().toString());
+            tableRefs = ImmutableList.of(tableRef);
+            schemas = ImmutableList.of(schema);
+        }
+
+        public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef) {
+            super(connection, 0, null);
+            alias = tableRef.getTableAlias();
+            tableRefs = ImmutableList.of(tableRef);
+            schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString()));
+        }
+
+        public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef, Map<String, UDFParseNode> udfParseNodes, TableName mutatingTableName) throws SQLException {
+            super(connection, 0, false, udfParseNodes, mutatingTableName);
+            alias = tableRef.getTableAlias();
+            tableRefs = ImmutableList.of(tableRef);
+            schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString()));
+        }
+
+        public SingleTableColumnResolver(TableRef tableRef) throws SQLException {
+            super(null, 0, null);
+            alias = tableRef.getTableAlias();
+            tableRefs = ImmutableList.of(tableRef);
+            schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString()));
+        }
+
+		
+
+		@Override
+		public List<TableRef> getTables() {
+			return tableRefs;
+		}
+
+        @Override
+        public List<PFunction> getFunctions() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public TableRef resolveTable(String schemaName, String tableName)
+                throws SQLException {
+            TableRef tableRef = tableRefs.get(0);
+            /*
+             * The only case we can definitely verify is when both a schemaName and a tableName
+             * are provided. Otherwise, the tableName might be a column family. In this case,
+             * this will be validated by resolveColumn.
+             */
+            if (schemaName != null || tableName != null) {
+                String resolvedTableName = tableRef.getTable().getTableName().getString();
+                String resolvedSchemaName = tableRef.getTable().getSchemaName().getString();
+                if (schemaName != null && tableName != null) {
+                    if ( ! ( schemaName.equals(resolvedSchemaName)  &&
+                             tableName.equals(resolvedTableName) )  &&
+                             ! schemaName.equals(alias) ) {
+                        throw new TableNotFoundException(schemaName, tableName);
+                    }
+                }
+            }
+            return tableRef;
+        }
+
+		@Override
+		public ColumnRef resolveColumn(String schemaName, String tableName,
+				String colName) throws SQLException {
+			TableRef tableRef = tableRefs.get(0);
+			boolean resolveCF = false;
+			if (schemaName != null || tableName != null) {
+			    String resolvedTableName = tableRef.getTable().getTableName().getString();
+			    String resolvedSchemaName = tableRef.getTable().getSchemaName().getString();
+			    if (schemaName != null && tableName != null) {
+                    if ( ! ( schemaName.equals(resolvedSchemaName)  &&
+                             tableName.equals(resolvedTableName) )) {
+                        if (!(resolveCF = schemaName.equals(alias))) {
+                            throw new ColumnNotFoundException(schemaName, tableName, null, colName);
+                        }
+                    }
+			    } else { // schemaName == null && tableName != null
+                    if (tableName != null && !tableName.equals(alias) && (!tableName.equals(resolvedTableName) || !resolvedSchemaName.equals(""))) {
+                        resolveCF = true;
+                   }
+			    }
+
+			}
+        	PColumn column = resolveCF
+        	        ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName)
+        			: tableRef.getTable().getColumnForColumnName(colName);
+            return new ColumnRef(tableRef, column.getPosition());
+		}
+
+        @Override
+        public PSchema resolveSchema(String schemaName) throws SQLException {
+            return schemas.get(0);
+        }
+
+        @Override
+        public List<PSchema> getSchemas() {
+            return schemas;
+        }
+    }
+
+    private static class ColumnResolverWithUDF implements ColumnResolver {
+        protected final PhoenixConnection connection;
+        protected final MetaDataClient client;
+        // Fudge factor to add to current time we calculate. We need this when we do a SELECT
+        // on Windows because the millis timestamp granularity is so bad we sometimes won't
+        // get the data back that we just upsert.
+        protected final int tsAddition;
+        protected final Map<String, PFunction> functionMap;
+        protected List<PFunction> functions;
+        //PHOENIX-3823 : Force update cache when mutating table and select table are same
+        //(UpsertSelect or Delete with select on same table)
+
+        private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition,
+                                      boolean updateCacheImmediately, Map<String,
+                UDFParseNode> udfParseNodes) throws SQLException {
+            this.connection = connection;
+            this.client = connection == null ? null : new MetaDataClient(connection);
+            this.tsAddition = tsAddition;
+            functionMap = new HashMap<String, PFunction>(1);
+            if (udfParseNodes.isEmpty()) {
+                functions = Collections.<PFunction> emptyList();
+            } else {
+                functions = createFunctionRef(new ArrayList<String>(udfParseNodes.keySet()),
+                        updateCacheImmediately);
+                for (PFunction function : functions) {
+                    functionMap.put(function.getFunctionName(), function);
+                }
+            }
+        }
+
+        private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition) {
+            this.connection = connection;
+            this.client = connection == null ? null : new MetaDataClient(connection);
+            this.tsAddition = tsAddition;
+            functionMap = new HashMap<String, PFunction>(1);
+            this.functions = Collections.<PFunction>emptyList();
+        }
+
+        @Override
+        public List<PFunction> getFunctions() {
+            return functions;
+        }
+
+        private List<PFunction> createFunctionRef(List<String> functionNames,
+                                                  boolean updateCacheImmediately)
+                throws SQLException {
+            long timeStamp = QueryConstants.UNSET_TIMESTAMP;
+            int numFunctions = functionNames.size();
+            List<PFunction> functionsFound = new ArrayList<PFunction>(functionNames.size());
+            if (updateCacheImmediately || connection.getAutoCommit()) {
+                getFunctionFromCache(functionNames, functionsFound, true);
+                if (functionNames.isEmpty()) {
+                    return functionsFound;
+                }
+                MetaDataMutationResult result = client.updateCache(functionNames);
+                timeStamp = result.getMutationTime();
+                functionsFound = result.getFunctions();
+                if (functionNames.size() != functionsFound.size()) {
+                    throw new FunctionNotFoundException("Some of the functions in " +
+                            functionNames.toString()+" are not found");
+                }
+            } else {
+                getFunctionFromCache(functionNames, functionsFound, false);
+                // We always attempt to update the cache in the event of a FunctionNotFoundException
+                MetaDataMutationResult result = null;
+                if (!functionNames.isEmpty()) {
+                    result = client.updateCache(functionNames);
+                }
+                if (result != null) {
+                    if (!result.getFunctions().isEmpty()) {
+                        functionsFound.addAll(result.getFunctions());
+                    }
+                    if (result.wasUpdated()) {
+                        timeStamp = result.getMutationTime();
+                    }
+                }
+                if (functionsFound.size()!=numFunctions) {
+                    throw new FunctionNotFoundException("Some of the functions in " +
+                            functionNames.toString()+" are not found", timeStamp);
+                }
+            }
+            if (timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                timeStamp += tsAddition;
+            }
+
+            if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " +
+                        functionNames.toString() + "at timestamp " + timeStamp, connection));
+            }
+            return functionsFound;
+        }
+
+        private void getFunctionFromCache(List<String> functionNames,
+                                          List<PFunction> functionsFound,
+                                          boolean getOnlyTemporyFunctions) {
+            Iterator<String> iterator = functionNames.iterator();
+            while (iterator.hasNext()) {
+                PFunction function = null;
+                String functionName = iterator.next();
+                try {
+                    function = connection.getMetaDataCache().getFunction(
+                            new PTableKey(connection.getTenantId(), functionName));
+                } catch (FunctionNotFoundException e1) {
+                    if (connection.getTenantId() != null) { // Check with null tenantId next
+                        try {
+                            function = connection.getMetaDataCache().getFunction(
+                                    new PTableKey(null, functionName));
+                        } catch (FunctionNotFoundException ignored) {
+                        }
+                    }
+                }
+                if (function != null) {
+                    if (getOnlyTemporyFunctions) {
+                        if (function.isTemporaryFunction()) {
+                            functionsFound.add(function);
+                            iterator.remove();
+                        }
+                    } else {
+                        functionsFound.add(function);
+                        iterator.remove();
+                    }
+                }
+            }
+        }
+
+        @Override
+        public PFunction resolveFunction(String functionName) throws SQLException {
+            PFunction function = functionMap.get(functionName);
+            if (function == null) {
+                throw new FunctionNotFoundException(functionName);
+            }
+            return function;
+        }
+
+        @Override
+        public boolean hasUDFs() {
+            return !functions.isEmpty();
+        }
+
+        @Override
+        public List<TableRef> getTables() {
+            return Collections.singletonList(TableRef.EMPTY_TABLE_REF);
+        }
+
+
+        @Override
+        public TableRef resolveTable(String schemaName, String tableName)
+                throws SQLException {
+            throw new TableNotFoundException(schemaName, tableName);
+        }
+
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName)
+                throws SQLException {
+            throw new ColumnNotFoundException(schemaName, tableName, null, colName);
+        }
+
+        @Override
+        public PSchema resolveSchema(String schemaName) throws SQLException {
+            throw new SchemaNotFoundException(schemaName);
+        }
+
+        @Override
+        public List<PSchema> getSchemas() {
+            return Collections.emptyList();
+        }
+
+    }
+
+    private static abstract class BaseColumnResolver extends ColumnResolverWithUDF {
+        protected TableName mutatingTableName = null;
+
+        private BaseColumnResolver(PhoenixConnection connection, int tsAddition,
+                                   TableName mutatingTableName) {
+            super(connection, tsAddition);
+            this.mutatingTableName = mutatingTableName;
+        }
+
+        private BaseColumnResolver(PhoenixConnection connection, int tsAddition,
+                                   boolean updateCacheImmediately,
+                                   Map<String, UDFParseNode> udfParseNodes,
+                                   TableName mutatingTableName) throws SQLException {
+            super(connection, tsAddition, updateCacheImmediately, udfParseNodes);
+            this.mutatingTableName = mutatingTableName;
+        }
+
+        protected PSchema createSchemaRef(String schemaName, boolean updateCacheImmediately) throws SQLException {
+            long timeStamp = QueryConstants.UNSET_TIMESTAMP;
+            PSchema theSchema = null;
+            MetaDataClient client = new MetaDataClient(connection);
+            try {
+                if (updateCacheImmediately) {
+                    MetaDataMutationResult result = client.updateCache(schemaName, true);
+                    timeStamp = TransactionUtil.getResolvedTimestamp(connection, result);
+                    theSchema = result.getSchema();
+                    if (theSchema == null) {
+                        throw new SchemaNotFoundException(schemaName, timeStamp);
+                    }
+                } else {
+                    try {
+                        theSchema = connection.getSchema(new PTableKey(null, schemaName));
+                    } catch (SchemaNotFoundException e1) {
+                    }
+                    // We always attempt to update the cache in the event of a
+                    // SchemaNotFoundException
+                    if (theSchema == null) {
+                        MetaDataMutationResult result = client.updateCache(schemaName, true);
+                        if (result.wasUpdated()) {
+                            timeStamp = TransactionUtil.getResolvedTimestamp(connection, result);
+                            theSchema = result.getSchema();
+                        }
+                    }
+                    if (theSchema == null) {
+                        throw new SchemaNotFoundException(schemaName, timeStamp);
+                    }
+                }
+                return theSchema;
+            } catch(Throwable e) {
+                TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null,
+                        NUM_METADATA_LOOKUP_FAILURES, 1);
+                throw e;
+            }
+        }
+
+        protected TableRef createTableRef(String connectionSchemaName, NamedTableNode tableNode,
+            boolean updateCacheImmediately, boolean alwaysHitServer) throws SQLException {
+            String tableName = tableNode.getName().getTableName();
+            String schemaName = tableNode.getName().getSchemaName();
+            schemaName = connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName;
+            long timeStamp = QueryConstants.UNSET_TIMESTAMP;
+            String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            PName tenantId = connection.getTenantId();
+            PTable theTable = null;
+            boolean error = false;
+
+            try {
+                if (updateCacheImmediately) {
+                    //Force update cache when mutating and ref table are same except for meta tables
+                    if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName) &&
+                            mutatingTableName != null && tableNode != null &&
+                            tableNode.getName().equals(mutatingTableName)) {
+                        alwaysHitServer = true;
+                    }
+
+                    try {
+                        MetaDataMutationResult result = client.updateCache(tenantId, schemaName, tableName, alwaysHitServer);
+                        timeStamp = TransactionUtil.getResolvedTimestamp(connection, result);
+                        theTable = result.getTable();
+                        MutationCode mutationCode = result.getMutationCode();
+                        if (theTable == null) {
+                            throw new TableNotFoundException(schemaName, tableName, timeStamp);
+                        }
+                    } catch (Throwable e) {
+                        error = true;
+                        throw e;
+                    }
+                } else {
+                    try {
+                        theTable = connection.getTable(new PTableKey(tenantId, fullTableName));
+                    } catch (TableNotFoundException e1) {
+                        if (tenantId != null) { // Check with null tenantId next
+                            try {
+                                theTable = connection.getTable(new PTableKey(null, fullTableName));
+                            } catch (TableNotFoundException e2) {
+                            }
+                        }
+                    }
+                    // We always attempt to update the cache in the event of a TableNotFoundException
+                    try {
+                        if (theTable == null) {
+                            MetaDataMutationResult result = client.updateCache(schemaName, tableName);
+                            if (result.wasUpdated()) {
+                                timeStamp = TransactionUtil.getResolvedTimestamp(connection, result);
+                            }
+                            theTable = result.getTable();
+                        }
+                        if (theTable == null) {
+                            throw new TableNotFoundException(schemaName, tableName, timeStamp);
+                        }
+                    } catch (Throwable e) {
+                        error = true;
+                        throw e;
+                    }
+                }
+                // Add any dynamic columns to the table declaration
+                List<ColumnDef> dynamicColumns = tableNode.getDynamicColumns();
+                theTable = addDynamicColumns(dynamicColumns, theTable);
+                if (timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                    timeStamp += tsAddition;
+                }
+                TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
+                if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+                    LOGGER.debug(LogUtil.addCustomAnnotations(
+                            "Re-resolved stale table " + fullTableName + " with seqNum "
+                                    + tableRef.getTable().getSequenceNumber() + " at timestamp "
+                                    + tableRef.getTable().getTimeStamp() + " with "
+                                    + tableRef.getTable().getColumns().size() + " columns: "
+                                    + tableRef.getTable().getColumns(), connection));
+                }
+                return tableRef;
+            } finally {
+                if (error) {
+                    TableMetricsManager.updateMetricsForSystemCatalogTableMethod(fullTableName,
+                            NUM_METADATA_LOOKUP_FAILURES, 1);
+                }
+            }
+        }
+
+        protected PTable addDynamicColumns(List<ColumnDef> dynColumns, PTable theTable)
+                throws SQLException {
+            if (!dynColumns.isEmpty()) {
+                List<PColumn> existingColumns = theTable.getColumns();
+                // Need to skip the salting column, as it's handled in the PTable builder call below
+                List<PColumn> allcolumns = new ArrayList<>(
+                        theTable.getBucketNum() == null ? existingColumns :
+                                existingColumns.subList(1, existingColumns.size()));
+                // Position still based on with the salting columns
+                int position = existingColumns.size();
+                PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable));
+                for (ColumnDef dynColumn : dynColumns) {
+                    PName familyName = defaultFamilyName;
+                    PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName());
+                    String family = dynColumn.getColumnDefName().getFamilyName();
+                    if (family != null) {
+                        theTable.getColumnFamily(family); // Verifies that column family exists
+                        familyName = PNameFactory.newName(family);
+                    }
+                    allcolumns.add(new PColumnImpl(name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(),
+                            dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false, dynColumn.getExpression(), false, true, Bytes.toBytes(dynColumn.getColumnDefName().getColumnName()),
+                        HConstants.LATEST_TIMESTAMP));
+                    position++;
+                }
+                theTable = PTableImpl.builderWithColumns(theTable, allcolumns)
+                        .build();
+            }
+            return theTable;
+        }
+    }
+
+    private static class MultiTableColumnResolver extends BaseColumnResolver implements TableNodeVisitor<Void> {
+        protected final ListMultimap<String, TableRef> tableMap;
+        protected final List<TableRef> tables;
+        private String connectionSchemaName;
+
+        private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition) {
+            super(connection, tsAddition, null);
+            tableMap = ArrayListMultimap.<String, TableRef> create();
+            tables = Lists.newArrayList();
+            try {
+                connectionSchemaName = connection.getSchema();
+            } catch (SQLException e) {
+                // ignore
+            }
+        }
+
+        private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition, Map<String, UDFParseNode> udfParseNodes, TableName mutatingTableName) throws SQLException {
+            super(connection, tsAddition, false, udfParseNodes, mutatingTableName);
+            tableMap = ArrayListMultimap.<String, TableRef> create();
+            tables = Lists.newArrayList();
+        }
+
+        @Override
+        public List<TableRef> getTables() {
+            return tables;
+        }
+
+        @Override
+        public Void visit(BindTableNode boundTableNode) throws SQLException {
+            throw new SQLFeatureNotSupportedException();
+        }
+
+        @Override
+        public Void visit(JoinTableNode joinNode) throws SQLException {
+            joinNode.getLHS().accept(this);
+            joinNode.getRHS().accept(this);
+            return null;
+        }
+
+        @Override
+        public Void visit(NamedTableNode tableNode) throws SQLException {
+            String alias = tableNode.getAlias();
+            TableRef tableRef = createTableRef(connectionSchemaName, tableNode, true, false);
+            PTable theTable = tableRef.getTable();
+
+            if (alias != null) {
+                tableMap.put(alias, tableRef);
+            }
+
+            String name = theTable.getName().getString();
+            //avoid having one name mapped to two identical TableRef.
+            if (alias == null || !alias.equals(name)) {
+            	tableMap.put(name, tableRef);
+            }
+            tables.add(tableRef);
+            return null;
+        }
+
+        @Override
+        public Void visit(DerivedTableNode subselectNode) throws SQLException {
+            List<AliasedNode> selectNodes = subselectNode.getSelect().getSelect();
+            List<PColumn> columns = new ArrayList<PColumn>();
+            int position = 0;
+            for (AliasedNode aliasedNode : selectNodes) {
+                String alias = aliasedNode.getAlias();
+                if (alias == null) {
+                    ParseNode node = aliasedNode.getNode();
+                    if (node instanceof WildcardParseNode
+                            || node instanceof TableWildcardParseNode
+                            || node instanceof FamilyWildcardParseNode)
+                        throw new SQLFeatureNotSupportedException("Wildcard in subqueries not supported.");
+
+                    alias = SchemaUtil.normalizeIdentifier(node.getAlias());
+                }
+                if (alias == null) {
+                    // Use position as column name for anonymous columns, which can be
+                    // referenced by an outer wild-card select.
+                    alias = String.valueOf(position);
+                }
+                PName name = PNameFactory.newName(alias);
+                PColumnImpl column = new PColumnImpl(PNameFactory.newName(alias),
+                        PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY),
+                        null, 0, 0, true, position++, SortOrder.ASC, null, null, false, null, false, false, name.getBytes(),
+                    HConstants.LATEST_TIMESTAMP);
+                columns.add(column);
+            }
+            PTable t = new PTableImpl.Builder()
+                    .setType(PTableType.SUBQUERY)
+                    .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP)
+                    .setIndexDisableTimestamp(0L)
+                    .setSequenceNumber(PTable.INITIAL_SEQ_NUM)
+                    .setImmutableRows(false)
+                    .setDisableWAL(false)
+                    .setMultiTenant(false)
+                    .setStoreNulls(false)
+                    .setUpdateCacheFrequency(0)
+                    .setNamespaceMapped(SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY,
+                            connection.getQueryServices().getProps()))
+                    .setAppendOnlySchema(false)
+                    .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)
+                    .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS)
+                    .setBaseColumnCount(QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT)
+                    .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER)
+                    .setUseStatsForParallelization(true)
+                    .setExcludedColumns(ImmutableList.of())
+                    .setSchemaName(PName.EMPTY_NAME)
+                    .setTableName(PName.EMPTY_NAME)
+                    .setRowKeyOrderOptimizable(false)
+                    .setIndexes(Collections.emptyList())
+                    .setPhysicalNames(ImmutableList.of())
+                    .setColumns(columns)
+                    .build();
+
+            String alias = subselectNode.getAlias();
+            TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false);
+            tableMap.put(alias, tableRef);
+            tables.add(tableRef);
+            return null;
+        }
+
+        /**
+         * Invoke the {@link #visit(DerivedTableNode)} again to refresh the inner state.
+         * @param derivedTableNode
+         * @return
+         * @throws SQLException
+         */
+        public TableRef refreshDerivedTableNode(DerivedTableNode derivedTableNode) throws SQLException {
+            String tableAlias = derivedTableNode.getAlias();
+            List<TableRef> removedTableRefs = this.tableMap.removeAll(tableAlias);
+            if (removedTableRefs == null || removedTableRefs.isEmpty()) {
+                return null;
+            }
+            tables.removeAll(removedTableRefs);
+            this.visit(derivedTableNode);
+            return this.resolveTable(null, tableAlias);
+        }
+
+        private static class ColumnFamilyRef {
+            private final TableRef tableRef;
+            private final PColumnFamily family;
+
+            ColumnFamilyRef(TableRef tableRef, PColumnFamily family) {
+                this.tableRef = tableRef;
+                this.family = family;
+            }
+
+            public TableRef getTableRef() {
+                return tableRef;
+            }
+
+            public PColumnFamily getFamily() {
+                return family;
+            }
+        }
+
+        @Override
+        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
+            String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            List<TableRef> tableRefs = tableMap.get(fullTableName);
+            if (tableRefs.size() == 0) {
+                throw new TableNotFoundException(fullTableName);
+            } else if (tableRefs.size() > 1) {
+                throw new AmbiguousTableException(tableName);
+            } else {
+                return tableRefs.get(0);
+            }
+        }
+
+        private ColumnFamilyRef resolveColumnFamily(String tableName, String cfName) throws SQLException {
+            if (tableName == null) {
+                ColumnFamilyRef theColumnFamilyRef = null;
+                Iterator<TableRef> iterator = tables.iterator();
+                while (iterator.hasNext()) {
+                    TableRef tableRef = iterator.next();
+                    try {
+                        PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName);
+                        if (columnFamily == null) { 
+                            throw new TableNotFoundException(cfName); 
+                        }
+                        theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily);
+                    } catch (ColumnFamilyNotFoundException e) {}
+                }
+                if (theColumnFamilyRef != null) { return theColumnFamilyRef; }
+                throw new TableNotFoundException(cfName);
+            } else {
+                TableRef tableRef = null;
+                try {
+                    tableRef = resolveTable(null, tableName);
+                } catch (TableNotFoundException e) {
+                    return resolveColumnFamily(null, cfName);
+                }
+                PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName);
+                return new ColumnFamilyRef(tableRef, columnFamily);
+            }
+        }
+
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+            if (tableName == null) {
+                int theColumnPosition = -1;
+                TableRef theTableRef = null;
+                Iterator<TableRef> iterator = tables.iterator();
+                while (iterator.hasNext()) {
+                    TableRef tableRef = iterator.next();
+                    try {
+                        PColumn column = tableRef.getTable().getColumnForColumnName(colName);
+                        if (theTableRef != null) { throw new AmbiguousColumnException(colName); }
+                        theTableRef = tableRef;
+                        theColumnPosition = column.getPosition();
+                    } catch (ColumnNotFoundException e) {
+
+                    }
+                }
+                if (theTableRef != null) { return new ColumnRef(theTableRef, theColumnPosition); }
+                throw new ColumnNotFoundException(schemaName, tableName, null, colName);
+            } else {
+                try {
+                    TableRef tableRef = resolveTable(schemaName, tableName);
+                    PColumn column = tableRef.getTable().getColumnForColumnName(colName);
+                    return new ColumnRef(tableRef, column.getPosition());
+                } catch (TableNotFoundException e) {
+                    TableRef theTableRef = null;
+                    PColumn theColumn = null;
+                    PColumnFamily theColumnFamily = null;
+                    if (schemaName != null) {
+                        try {
+                            // Try schemaName as the tableName and use tableName as column family name
+                            theTableRef = resolveTable(null, schemaName);
+                            theColumnFamily = theTableRef.getTable().getColumnFamily(tableName);
+                            theColumn = theColumnFamily.getPColumnForColumnName(colName);
+                        } catch (MetaDataEntityNotFoundException e2) {
+                        }
+                    } 
+                    if (theColumn == null) {
+                        // Try using the tableName as a columnFamily reference instead
+                        // and resolve column in each column family.
+                        Iterator<TableRef> iterator = tables.iterator();
+                        while (iterator.hasNext()) {
+                            TableRef tableRef = iterator.next();
+                            try {
+                                PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(tableName);
+                                PColumn column = columnFamily.getPColumnForColumnName(colName);
+                                if (theColumn != null) {
+                                    throw new AmbiguousColumnException(colName);
+                                }
+                                theTableRef = tableRef;
+                                theColumnFamily = columnFamily;
+                                theColumn = column;
+                            } catch (MetaDataEntityNotFoundException e1) {
+                            }
+                        }
+                        if (theColumn == null) { 
+                            throw new ColumnNotFoundException(colName);
+                        }
+                    }
+                    ColumnFamilyRef cfRef = new ColumnFamilyRef(theTableRef, theColumnFamily);
+                    return new ColumnRef(cfRef.getTableRef(), theColumn.getPosition());
+                }
+            }
+        }
+
+        @Override
+        public PSchema resolveSchema(String schemaName) throws SQLException {
+            // TODO Auto-generated method stub
+            return null;
+        }
+
+        @Override
+        public List<PSchema> getSchemas() {
+            // TODO Auto-generated method stub
+            return null;
+        }
+    }
+    
+    private static class ProjectedTableColumnResolver extends MultiTableColumnResolver {
+        private final boolean isIndex;
+        private final List<TableRef> theTableRefs;
+        private final Map<ColumnRef, Integer> columnRefMap;
+        private ProjectedTableColumnResolver(PTable projectedTable, PhoenixConnection conn, Map<String, UDFParseNode> udfParseNodes) throws SQLException {
+            super(conn, 0, udfParseNodes, null);
+            Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED);
+            this.isIndex = projectedTable.getIndexType() == IndexType.LOCAL
+                    || IndexUtil.isGlobalIndex(projectedTable);
+            this.columnRefMap = new HashMap<ColumnRef, Integer>();
+            long ts = Long.MAX_VALUE;
+            for (int i = projectedTable.getBucketNum() == null ? 0 : 1; i < projectedTable.getColumns().size(); i++) {
+                PColumn column = projectedTable.getColumns().get(i);
+                ColumnRef colRef = ((ProjectedColumn) column).getSourceColumnRef();
+                TableRef tableRef = colRef.getTableRef();
+                if (!tables.contains(tableRef)) {
+                    String alias = tableRef.getTableAlias();
+                    if (alias != null) {
+                        this.tableMap.put(alias, tableRef);
+                    }
+                    String name = tableRef.getTable().getName().getString();
+                    if (alias == null || !alias.equals(name)) {
+                        tableMap.put(name, tableRef);
+                    }
+                    tables.add(tableRef);
+                    if (tableRef.getLowerBoundTimeStamp() < ts) {
+                        ts = tableRef.getLowerBoundTimeStamp();
+                    }
+                }
+                this.columnRefMap.put(new ColumnRef(tableRef, colRef.getColumnPosition()), column.getPosition());
+            }
+            this.theTableRefs = ImmutableList.of(new TableRef(ParseNodeFactory.createTempAlias(), projectedTable, ts, false));
+            
+        }
+        
+        @Override
+        public List<TableRef> getTables() {
+            return theTableRefs;
+        }
+        
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+            ColumnRef colRef;
+            try {
+                colRef = super.resolveColumn(schemaName, tableName, colName);
+            } catch (ColumnNotFoundException e) {
+                // This could be a ColumnRef for index data column.
+                TableRef tableRef = isIndex ? super.getTables().get(0)
+                        : super.resolveTable(schemaName, tableName);
+                if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) {
+                    try {
+                        TableRef parentTableRef = super.resolveTable(
+                                tableRef.getTable().getSchemaName().getString(),
+                                tableRef.getTable().getParentTableName().getString());
+                        colRef = new ColumnRef(parentTableRef,
+                                IndexUtil.getDataColumnFamilyName(colName),
+                                IndexUtil.getDataColumnName(colName));
+                    } catch (TableNotFoundException te) {
+                        throw e;
+                    }
+                } else {
+                    throw e;
+                }
+            }
+            Integer position = columnRefMap.get(colRef);
+            if (position == null)
+                throw new ColumnNotFoundException(schemaName, tableName, null, colName);
+            
+            return new ColumnRef(theTableRefs.get(0), position);
+        }
+    }
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
new file mode 100644
index 0000000000..695ba0d8e2
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
@@ -0,0 +1,476 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+import net.jcip.annotations.Immutable;
+
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.compile.ExplainPlanAttributes
+    .ExplainPlanAttributesBuilder;
+import org.apache.phoenix.compile.OrderPreservingTracker.Info;
+import org.apache.phoenix.compile.OrderPreservingTracker.Ordering;
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.expression.CoerceExpression;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.DistinctCountParseNode;
+import org.apache.phoenix.parse.HintNode.Hint;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.util.IndexUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+/**
+ * 
+ * Validates GROUP BY clause and builds a {@link GroupBy} instance to encapsulate the
+ * group by expressions.
+ *
+ * 
+ * @since 0.1
+ */
+public class GroupByCompiler {
+    @Immutable
+    public static class GroupBy {
+        private final List<Expression> expressions;
+        private final List<Expression> keyExpressions;
+        private final boolean isOrderPreserving;
+        private final int orderPreservingColumnCount;
+        private final boolean isUngroupedAggregate;
+        private final List<Info> orderPreservingTrackInfos;
+        public static final GroupByCompiler.GroupBy EMPTY_GROUP_BY = new GroupBy(new GroupByBuilder()) {
+            @Override
+            public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException {
+                return this;
+            }
+            
+            @Override
+            public void explain(List<String> planSteps, Integer limit) {
+            }
+
+            @Override
+            public void explain(List<String> planSteps, Integer limit,
+                    ExplainPlanAttributesBuilder explainPlanAttributesBuilder) {
+            }
+
+            @Override
+            public String getScanAttribName() {
+                return null;
+            }
+        };
+
+        public static final GroupByCompiler.GroupBy UNGROUPED_GROUP_BY = new GroupBy(new GroupByBuilder().setIsOrderPreserving(true).setIsUngroupedAggregate(true)) {
+            @Override
+            public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException {
+                return this;
+            }
+
+            @Override
+            public void explain(List<String> planSteps, Integer limit) {
+                planSteps.add("    SERVER AGGREGATE INTO SINGLE ROW");
+            }
+
+            @Override
+            public void explain(List<String> planSteps, Integer limit,
+                    ExplainPlanAttributesBuilder explainPlanAttributesBuilder) {
+                planSteps.add("    SERVER AGGREGATE INTO SINGLE ROW");
+                if (explainPlanAttributesBuilder != null) {
+                    explainPlanAttributesBuilder.setServerAggregate(
+                        "SERVER AGGREGATE INTO SINGLE ROW");
+                }
+            }
+
+            @Override
+            public String getScanAttribName() {
+                return BaseScannerRegionObserverConstants.UNGROUPED_AGG;
+            }
+        };
+        
+        private GroupBy(GroupByBuilder builder) {
+            this.expressions = ImmutableList.copyOf(builder.expressions);
+            this.keyExpressions = builder.expressions == builder.keyExpressions ? 
+                    this.expressions : builder.keyExpressions == null ? null :
+                        ImmutableList.copyOf(builder.keyExpressions);
+            this.isOrderPreserving = builder.isOrderPreserving;
+            this.orderPreservingColumnCount = builder.orderPreservingColumnCount;
+            this.isUngroupedAggregate = builder.isUngroupedAggregate;
+            this.orderPreservingTrackInfos = builder.orderPreservingTrackInfos;
+        }
+        
+        public List<Expression> getExpressions() {
+            return expressions;
+        }
+        
+        public List<Expression> getKeyExpressions() {
+            return keyExpressions;
+        }
+        
+        public String getScanAttribName() {
+            if (isUngroupedAggregate) {
+                return BaseScannerRegionObserverConstants.UNGROUPED_AGG;
+            } else if (isOrderPreserving) {
+                return BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS;
+            } else {
+                return BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS;
+            }
+        }
+        
+        public boolean isEmpty() {
+            return expressions.isEmpty();
+        }
+        
+        public boolean isOrderPreserving() {
+            return isOrderPreserving;
+        }
+        
+        public boolean isUngroupedAggregate() {
+            return isUngroupedAggregate;
+        }
+
+        public int getOrderPreservingColumnCount() {
+            return orderPreservingColumnCount;
+        }
+
+        public List<Info> getOrderPreservingTrackInfos() {
+            return orderPreservingTrackInfos;
+        }
+
+        public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException {
+            boolean isOrderPreserving = this.isOrderPreserving;
+            int orderPreservingColumnCount = 0;
+            if (isOrderPreserving) {
+                OrderPreservingTracker tracker = new OrderPreservingTracker(
+                        context,
+                        GroupBy.EMPTY_GROUP_BY,
+                        Ordering.UNORDERED,
+                        expressions.size(),
+                        null,
+                        innerQueryPlan,
+                        whereExpression);
+                for (int i = 0; i < expressions.size(); i++) {
+                    Expression expression = expressions.get(i);
+                    tracker.track(expression);
+                }
+                
+                // This is true if the GROUP BY is composed of only PK columns. We further check here that
+                // there are no "gaps" in the PK columns positions used (i.e. we start with the first PK
+                // column and use each subsequent one in PK order).
+                isOrderPreserving = tracker.isOrderPreserving();
+                orderPreservingColumnCount = tracker.getOrderPreservingColumnCount();
+                if(isOrderPreserving) {
+                    //reorder the groupby expressions following pk columns
+                    List<Info> orderPreservingTrackInfos = tracker.getOrderPreservingTrackInfos();
+                    List<Expression> newExpressions = Info.extractExpressions(orderPreservingTrackInfos);
+                    assert newExpressions.size() == expressions.size();
+                    return new GroupBy.GroupByBuilder(this)
+                               .setIsOrderPreserving(isOrderPreserving)
+                               .setOrderPreservingColumnCount(orderPreservingColumnCount)
+                               .setExpressions(newExpressions)
+                               .setKeyExpressions(newExpressions)
+                               .setOrderPreservingTrackInfos(orderPreservingTrackInfos)
+                               .build();
+                }
+            }
+
+            if (isUngroupedAggregate) {
+                return new GroupBy.GroupByBuilder(this)
+                           .setIsOrderPreserving(isOrderPreserving)
+                           .setOrderPreservingColumnCount(orderPreservingColumnCount)
+                           .build();
+            }
+            List<Expression> expressions = Lists.newArrayListWithExpectedSize(this.expressions.size());
+            List<Expression> keyExpressions = expressions;
+            List<Pair<Integer,Expression>> groupBys = Lists.newArrayListWithExpectedSize(this.expressions.size());
+            for (int i = 0; i < this.expressions.size(); i++) {
+                Expression expression = this.expressions.get(i);
+                groupBys.add(new Pair<Integer,Expression>(i,expression));
+            }
+            /*
+             * If we're not ordered along the PK axis, our coprocessor needs to collect all distinct groups within
+             * a region, sort them, and hold on to them until the scan completes.
+             * Put fixed length nullables at the end, so that we can represent null by the absence of the trailing
+             * value in the group by key. If there is more than one, we'll need to convert the ones not at the end
+             * into a Decimal so that we can use an empty byte array as our representation for null (which correctly
+             * maintains the sort order). We convert the Decimal back to the appropriate type (Integer or Long) when
+             * it's retrieved from the result set.
+             * 
+             * More specifically, order into the following buckets:
+             *   1) non nullable fixed width
+             *   2) variable width
+             *   3) nullable fixed width
+             * Within each bucket, order based on the column position in the schema. Putting the fixed width values
+             * in the beginning optimizes access to subsequent values.
+             */
+            Collections.sort(groupBys, new Comparator<Pair<Integer,Expression>>() {
+                @Override
+                public int compare(Pair<Integer,Expression> gb1, Pair<Integer,Expression> gb2) {
+                    Expression e1 = gb1.getSecond();
+                    Expression e2 = gb2.getSecond();
+                    PDataType t1 = e1.getDataType();
+                    PDataType t2 = e2.getDataType();
+                    boolean isFixed1 = t1.isFixedWidth();
+                    boolean isFixed2 = t2.isFixedWidth();
+                    boolean isFixedNullable1 = e1.isNullable() &&isFixed1;
+                    boolean isFixedNullable2 = e2.isNullable() && isFixed2;
+                    boolean oae1 = onlyAtEndType(e1);
+                    boolean oae2 = onlyAtEndType(e2);
+                    if (oae1 == oae2) {
+                        if (isFixedNullable1 == isFixedNullable2) {
+                            if (isFixed1 == isFixed2) {
+                                // Not strictly necessary, but forces the order to match the schema
+                                // column order (with PK columns before value columns).
+                                //return o1.getColumnPosition() - o2.getColumnPosition();
+                                return gb1.getFirst() - gb2.getFirst();
+                            } else if (isFixed1) {
+                                return -1;
+                            } else {
+                                return 1;
+                            }
+                        } else if (isFixedNullable1) {
+                            return 1;
+                        } else {
+                            return -1;
+                        }
+                    } else if (oae1) {
+                        return 1;
+                    } else {
+                        return -1;
+                    }
+                }
+            });
+            boolean foundOnlyAtEndType = false;
+            for (Pair<Integer,Expression> groupBy : groupBys) {
+                Expression e = groupBy.getSecond();
+                if (onlyAtEndType(e)) {
+                    if (foundOnlyAtEndType) {
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS)
+                        .setMessage(e.toString()).build().buildException();
+                    }
+                    foundOnlyAtEndType  = true;
+                }
+                expressions.add(e);
+            }
+            for (int i = expressions.size()-2; i >= 0; i--) {
+                Expression expression = expressions.get(i);
+                PDataType keyType = getGroupByDataType(expression);
+                if (keyType == expression.getDataType()) {
+                    continue;
+                }
+                // Copy expressions only when keyExpressions will be different than expressions
+                if (keyExpressions == expressions) {
+                    keyExpressions = new ArrayList<Expression>(expressions);
+                }
+                // Wrap expression in an expression that coerces the expression to the required type..
+                // This is done so that we have a way of expressing null as an empty key when more
+                // than one fixed and nullable types are used in a group by clause
+                keyExpressions.set(i, CoerceExpression.create(expression, keyType));
+            }
+
+            GroupBy groupBy = new GroupBy.GroupByBuilder().setIsOrderPreserving(isOrderPreserving).setExpressions(expressions).setKeyExpressions(keyExpressions).build();
+            return groupBy;
+        }
+        
+        public static class GroupByBuilder {
+            private boolean isOrderPreserving;
+            private int orderPreservingColumnCount;
+            private List<Expression> expressions = Collections.emptyList();
+            private List<Expression> keyExpressions = Collections.emptyList();
+            private boolean isUngroupedAggregate;
+            private List<Info> orderPreservingTrackInfos = Collections.emptyList();
+
+            public GroupByBuilder() {
+            }
+            
+            public GroupByBuilder(GroupBy groupBy) {
+                this.isOrderPreserving = groupBy.isOrderPreserving;
+                this.orderPreservingColumnCount = groupBy.orderPreservingColumnCount;
+                this.expressions = groupBy.expressions;
+                this.keyExpressions = groupBy.keyExpressions;
+                this.isUngroupedAggregate = groupBy.isUngroupedAggregate;
+            }
+            
+            public GroupByBuilder setExpressions(List<Expression> expressions) {
+                this.expressions = expressions;
+                return this;
+            }
+            
+            public GroupByBuilder setKeyExpressions(List<Expression> keyExpressions) {
+                this.keyExpressions = keyExpressions;
+                return this;
+            }
+            
+            public GroupByBuilder setIsOrderPreserving(boolean isOrderPreserving) {
+                this.isOrderPreserving = isOrderPreserving;
+                return this;
+            }
+
+            public GroupByBuilder setIsUngroupedAggregate(boolean isUngroupedAggregate) {
+                this.isUngroupedAggregate = isUngroupedAggregate;
+                return this;
+            }
+
+            public GroupByBuilder setOrderPreservingColumnCount(int orderPreservingColumnCount) {
+                this.orderPreservingColumnCount = orderPreservingColumnCount;
+                return this;
+            }
+
+            public GroupByBuilder setOrderPreservingTrackInfos(List<Info> orderPreservingTrackInfos) {
+                this.orderPreservingTrackInfos = orderPreservingTrackInfos;
+                return this;
+            }
+
+            public GroupBy build() {
+                return new GroupBy(this);
+            }
+        }
+
+        public void explain(List<String> planSteps, Integer limit) {
+            explainUtil(planSteps, limit, null);
+        }
+
+        private void explainUtil(List<String> planSteps, Integer limit,
+                ExplainPlanAttributesBuilder explainPlanAttributesBuilder) {
+            String serverAggregate;
+            if (isUngroupedAggregate) {
+                serverAggregate = "SERVER AGGREGATE INTO SINGLE ROW";
+            } else {
+                String groupLimit = limit == null ? "" : (" LIMIT " + limit
+                    + " GROUP" + (limit == 1 ? "" : "S"));
+                if (isOrderPreserving) {
+                    serverAggregate = "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY "
+                        + getExpressions() + groupLimit;
+                } else {
+                    serverAggregate = "SERVER AGGREGATE INTO DISTINCT ROWS BY "
+                        + getExpressions() + groupLimit;
+                }
+            }
+            planSteps.add("    " + serverAggregate);
+            if (explainPlanAttributesBuilder != null) {
+                explainPlanAttributesBuilder.setServerAggregate(serverAggregate);
+            }
+        }
+
+        public void explain(List<String> planSteps, Integer limit,
+                ExplainPlanAttributesBuilder explainPlanAttributesBuilder) {
+            explainUtil(planSteps, limit, explainPlanAttributesBuilder);
+        }
+    }
+
+    /**
+     * Get list of columns in the GROUP BY clause.
+     * @param context query context kept between compilation of different query clauses
+     * @param statement SQL statement being compiled
+     * @return the {@link GroupBy} instance encapsulating the group by clause
+     * @throws ColumnNotFoundException if column name could not be resolved
+     * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
+     */
+    public static GroupBy compile(StatementContext context, SelectStatement statement) throws SQLException {
+        List<ParseNode> groupByNodes = statement.getGroupBy();
+        /**
+         * Distinct can use an aggregate plan if there's no group by.
+         * Otherwise, we need to insert a step after the Merge that dedups.
+         * Order by only allowed on columns in the select distinct
+         */
+        boolean isUngroupedAggregate = false;
+        if (groupByNodes.isEmpty()) {
+            if (statement.isAggregate()) {
+                // do not optimize if
+                // 1. we were asked not to optimize
+                // 2. there's any HAVING clause
+                // TODO: PHOENIX-2989 suggests some ways to optimize the latter case
+                if (statement.getHint().hasHint(Hint.RANGE_SCAN) ||
+                        statement.getHaving() != null) {
+                    return GroupBy.UNGROUPED_GROUP_BY;
+                }
+                groupByNodes = Lists.newArrayListWithExpectedSize(statement.getSelect().size());
+                for (AliasedNode aliasedNode : statement.getSelect()) {
+                    if (aliasedNode.getNode() instanceof DistinctCountParseNode) {
+                        // only add children of DistinctCount nodes
+                        groupByNodes.addAll(aliasedNode.getNode().getChildren());
+                    } else {
+                        // if we found anything else, do not attempt any further optimization
+                        return GroupBy.UNGROUPED_GROUP_BY;
+                    }
+                }
+                isUngroupedAggregate = true;
+            } else if (statement.isDistinct()) {
+                groupByNodes = Lists.newArrayListWithExpectedSize(statement.getSelect().size());
+                for (AliasedNode aliasedNode : statement.getSelect()) {
+                    // for distinct at all select expression as group by conditions
+                    groupByNodes.add(aliasedNode.getNode());
+                }
+            } else {
+                return GroupBy.EMPTY_GROUP_BY;
+            }
+        }
+
+       // Accumulate expressions in GROUP BY
+        ExpressionCompiler compiler =
+                new ExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY);
+        List<Expression> expressions = Lists.newArrayListWithExpectedSize(groupByNodes.size());
+        for (int i = 0; i < groupByNodes.size(); i++) {
+            ParseNode node = groupByNodes.get(i);
+            Expression expression = node.accept(compiler);
+            if (!expression.isStateless()) {
+                if (compiler.isAggregate()) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_GROUP_BY)
+                        .setMessage(expression.toString()).build().buildException();
+                }
+                expressions.add(expression);
+            }
+            compiler.reset();
+        }
+        
+        if (expressions.isEmpty()) {
+            return GroupBy.EMPTY_GROUP_BY;
+        }
+        GroupBy groupBy = new GroupBy.GroupByBuilder()
+                .setIsOrderPreserving(OrderByCompiler.isTrackOrderByPreserving(statement))
+                .setExpressions(expressions).setKeyExpressions(expressions)
+                .setIsUngroupedAggregate(isUngroupedAggregate).build();
+        return groupBy;
+    }
+    
+    private static boolean onlyAtEndType(Expression expression) {
+        // Due to the encoding schema of these types, they may only be
+        // used once in a group by and are located at the end of the
+        // group by row key.
+        PDataType type = getGroupByDataType(expression);
+        return type.isArrayType() || type == PVarbinary.INSTANCE;
+    }
+    
+    private static PDataType getGroupByDataType(Expression expression) {
+        return IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
+    }
+    
+    private GroupByCompiler() {
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/HavingCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
new file mode 100644
index 0000000000..c3295d2b0f
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -0,0 +1,1589 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.schema.PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
+import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.expression.AndExpression;
+import org.apache.phoenix.expression.CoerceExpression;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.function.MinAggregateFunction;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.AndBooleanParseNodeVisitor;
+import org.apache.phoenix.parse.AndParseNode;
+import org.apache.phoenix.parse.AndRewriterBooleanParseNodeVisitor;
+import org.apache.phoenix.parse.BindTableNode;
+import org.apache.phoenix.parse.ColumnDef;
+import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.parse.ComparisonParseNode;
+import org.apache.phoenix.parse.ConcreteTableNode;
+import org.apache.phoenix.parse.DerivedTableNode;
+import org.apache.phoenix.parse.EqualParseNode;
+import org.apache.phoenix.parse.HintNode.Hint;
+import org.apache.phoenix.parse.JoinTableNode;
+import org.apache.phoenix.parse.JoinTableNode.JoinType;
+import org.apache.phoenix.parse.NamedTableNode;
+import org.apache.phoenix.parse.OrderByNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.parse.TableNode;
+import org.apache.phoenix.parse.TableNodeVisitor;
+import org.apache.phoenix.parse.TableWildcardParseNode;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.IndexUncoveredDataColumnRef;
+import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.EncodedColumnsUtil;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.ParseNodeUtil;
+import org.apache.phoenix.util.ParseNodeUtil.RewriteResult;
+import org.apache.phoenix.util.SchemaUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Sets;
+
+
+public class JoinCompiler {
+
+    public enum Strategy {
+        HASH_BUILD_LEFT,
+        HASH_BUILD_RIGHT,
+        SORT_MERGE,
+    }
+
+    public enum ColumnRefType {
+        JOINLOCAL,
+        GENERAL,
+    }
+
+    private final PhoenixStatement phoenixStatement;
+    /**
+     * The original join sql for current {@link JoinCompiler}.
+     */
+    private final SelectStatement originalJoinSelectStatement;
+    private final ColumnResolver origResolver;
+    private final boolean useStarJoin;
+    private final Map<ColumnRef, ColumnRefType> columnRefs;
+    private final Map<ColumnRef, ColumnParseNode> columnNodes;
+    private final boolean useSortMergeJoin;
+
+    private JoinCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) {
+        this.phoenixStatement = statement;
+        this.originalJoinSelectStatement = select;
+        this.origResolver = resolver;
+        this.useStarJoin = !select.getHint().hasHint(Hint.NO_STAR_JOIN);
+        this.columnRefs = new HashMap<ColumnRef, ColumnRefType>();
+        this.columnNodes = new HashMap<ColumnRef, ColumnParseNode>();
+        this.useSortMergeJoin = select.getHint().hasHint(Hint.USE_SORT_MERGE_JOIN);
+    }
+
+    /**
+     * After this method is called, the inner state of the parameter resolver may be changed by
+     * {@link FromCompiler#refreshDerivedTableNode} because of some sql optimization,
+     * see also {@link Table#pruneSubselectAliasedNodes()}.
+     * @param statement
+     * @param select
+     * @param resolver
+     * @return
+     * @throws SQLException
+     */
+    public static JoinTable compile(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) throws SQLException {
+        JoinCompiler compiler = new JoinCompiler(statement, select, resolver);
+        JoinTableConstructor constructor = compiler.new JoinTableConstructor();
+        Pair<Table, List<JoinSpec>> res = select.getFrom().accept(constructor);
+        JoinTable joinTable = res.getSecond() == null ? compiler.new JoinTable(res.getFirst()) : compiler.new JoinTable(res.getFirst(), res.getSecond());
+        if (select.getWhere() != null) {
+            joinTable.pushDownFilter(select.getWhere());
+        }
+
+        ColumnRefParseNodeVisitor generalRefVisitor = new ColumnRefParseNodeVisitor(resolver, statement.getConnection());
+        ColumnRefParseNodeVisitor joinLocalRefVisitor = new ColumnRefParseNodeVisitor(resolver, statement.getConnection());
+
+        joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor);
+
+        ParseNodeUtil.applyParseNodeVisitor(select, generalRefVisitor, false);
+
+        compiler.columnNodes.putAll(joinLocalRefVisitor.getColumnRefMap());
+        compiler.columnNodes.putAll(generalRefVisitor.getColumnRefMap());
+
+        for (ColumnRef ref : generalRefVisitor.getColumnRefMap().keySet()) {
+            compiler.columnRefs.put(ref, ColumnRefType.GENERAL);
+        }
+        for (ColumnRef ref : joinLocalRefVisitor.getColumnRefMap().keySet()) {
+            if (!compiler.columnRefs.containsKey(ref))
+                compiler.columnRefs.put(ref, ColumnRefType.JOINLOCAL);
+        }
+
+        /**
+         * After {@link ColumnRefParseNodeVisitor} is pushed down,
+         * pruning columns for each {@link JoinCompiler.Table} if
+         * {@link @link JoinCompiler.Table#isSubselect()}.
+         */
+        joinTable.pruneSubselectAliasedNodes();
+        return joinTable;
+    }
+
+    private class JoinTableConstructor implements TableNodeVisitor<Pair<Table, List<JoinSpec>>> {
+
+        private TableRef resolveTable(String alias, TableName name) throws SQLException {
+            if (alias != null)
+                return origResolver.resolveTable(null, alias);
+
+            return origResolver.resolveTable(name.getSchemaName(), name.getTableName());
+        }
+
+        @Override
+        public Pair<Table, List<JoinSpec>> visit(BindTableNode boundTableNode) throws SQLException {
+            TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName());
+            boolean isWildCard = isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver);
+            Table table = new Table(boundTableNode, isWildCard, Collections.<ColumnDef>emptyList(), boundTableNode.getTableSamplingRate(), tableRef);
+            return new Pair<Table, List<JoinSpec>>(table, null);
+        }
+
+        @Override
+        public Pair<Table, List<JoinSpec>> visit(JoinTableNode joinNode) throws SQLException {
+            Pair<Table, List<JoinSpec>> lhs = joinNode.getLHS().accept(this);
+            Pair<Table, List<JoinSpec>> rhs = joinNode.getRHS().accept(this);
+            JoinTable joinTable = rhs.getSecond() == null ? new JoinTable(rhs.getFirst()) : new JoinTable(rhs.getFirst(), rhs.getSecond());
+            List<JoinSpec> joinSpecs = lhs.getSecond();
+            if (joinSpecs == null) {
+                joinSpecs = new ArrayList<JoinSpec>();
+            }
+            joinSpecs.add(new JoinSpec(joinNode.getType(), joinNode.getOnNode(), joinTable, joinNode.isSingleValueOnly(), origResolver));
+
+            return new Pair<Table, List<JoinSpec>>(lhs.getFirst(), joinSpecs);
+        }
+
+        @Override
+        public Pair<Table, List<JoinSpec>> visit(NamedTableNode namedTableNode)
+                throws SQLException {
+            TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName());
+            boolean isWildCard = isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver);
+            Table table = new Table(namedTableNode, isWildCard, namedTableNode.getDynamicColumns(), namedTableNode.getTableSamplingRate(), tableRef);
+            return new Pair<Table, List<JoinSpec>>(table, null);
+        }
+
+        @Override
+        public Pair<Table, List<JoinSpec>> visit(DerivedTableNode subselectNode)
+                throws SQLException {
+            TableRef tableRef = resolveTable(subselectNode.getAlias(), null);
+            boolean isWildCard = isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver);
+            Table table = new Table(subselectNode, isWildCard, tableRef);
+            return new Pair<Table, List<JoinSpec>>(table, null);
+        }
+    }
+
+    public class JoinTable {
+        private final Table leftTable;
+        private final List<JoinSpec> joinSpecs;
+        private List<ParseNode> postFilters;
+        private final List<Table> allTables;
+        private final List<TableRef> allTableRefs;
+        private final boolean allLeftJoin;
+        private final boolean isPrefilterAccepted;
+        private final List<JoinSpec> prefilterAcceptedTables;
+
+        private JoinTable(Table table) {
+            this.leftTable = table;
+            this.joinSpecs = Collections.<JoinSpec>emptyList();
+            this.postFilters = Collections.EMPTY_LIST;
+            this.allTables = Collections.<Table>singletonList(table);
+            this.allTableRefs = Collections.<TableRef>singletonList(table.getTableRef());
+            this.allLeftJoin = false;
+            this.isPrefilterAccepted = true;
+            this.prefilterAcceptedTables = Collections.<JoinSpec>emptyList();
+        }
+
+        private JoinTable(Table table, List<JoinSpec> joinSpecs) {
+            this.leftTable = table;
+            this.joinSpecs = joinSpecs;
+            this.postFilters = new ArrayList<ParseNode>();
+            this.allTables = new ArrayList<Table>();
+            this.allTableRefs = new ArrayList<TableRef>();
+            this.allTables.add(table);
+            boolean allLeftJoin = true;
+            int lastRightJoinIndex = -1;
+            boolean hasFullJoin = false;
+            for (int i = 0; i < joinSpecs.size(); i++) {
+                JoinSpec joinSpec = joinSpecs.get(i);
+                this.allTables.addAll(joinSpec.getRhsJoinTable().getAllTables());
+                allLeftJoin = allLeftJoin && joinSpec.getType() == JoinType.Left;
+                hasFullJoin = hasFullJoin || joinSpec.getType() == JoinType.Full;
+                if (joinSpec.getType() == JoinType.Right) {
+                    lastRightJoinIndex = i;
+                }
+            }
+            for (Table t : this.allTables) {
+                this.allTableRefs.add(t.getTableRef());
+            }
+            this.allLeftJoin = allLeftJoin;
+            this.isPrefilterAccepted = !hasFullJoin && lastRightJoinIndex == -1;
+            this.prefilterAcceptedTables = new ArrayList<JoinSpec>();
+            for (int i = lastRightJoinIndex == -1 ? 0 : lastRightJoinIndex; i < joinSpecs.size(); i++) {
+                JoinSpec joinSpec = joinSpecs.get(i);
+                if (joinSpec.getType() != JoinType.Left && joinSpec.getType() != JoinType.Anti && joinSpec.getType() != JoinType.Full) {
+                    prefilterAcceptedTables.add(joinSpec);
+                }
+            }
+        }
+
+        public Table getLeftTable() {
+            return leftTable;
+        }
+
+        public List<JoinSpec> getJoinSpecs() {
+            return joinSpecs;
+        }
+
+        public List<Table> getAllTables() {
+            return allTables;
+        }
+
+        public List<TableRef> getAllTableRefs() {
+            return allTableRefs;
+        }
+
+        public List<TableRef> getLeftTableRef() {
+            return Collections.<TableRef>singletonList(leftTable.getTableRef());
+        }
+
+        public boolean isAllLeftJoin() {
+            return allLeftJoin;
+        }
+
+        public SelectStatement getOriginalJoinSelectStatement() {
+            return originalJoinSelectStatement;
+        }
+
+        public ColumnResolver getOriginalResolver() {
+            return origResolver;
+        }
+
+        public Map<ColumnRef, ColumnRefType> getColumnRefs() {
+            return columnRefs;
+        }
+
+        public ParseNode getPostFiltersCombined() {
+            return combine(postFilters);
+        }
+
+        public void addPostJoinFilter(ParseNode parseNode) {
+            if(this.postFilters == Collections.EMPTY_LIST) {
+                this.postFilters = new ArrayList<ParseNode>();
+            }
+            this.postFilters.add(parseNode);
+        }
+
+        public void addLeftTableFilter(ParseNode parseNode) throws SQLException {
+            if (isPrefilterAccepted) {
+                leftTable.addFilter(parseNode);
+            } else {
+                addPostJoinFilter(parseNode);
+            }
+        }
+
+        public List<JoinSpec> getPrefilterAcceptedJoinSpecs() {
+            return this.prefilterAcceptedTables;
+        }
+
+        /**
+         * try to decompose filter and push down to single table.
+         * @param filter
+         * @throws SQLException
+         */
+        public void pushDownFilter(ParseNode filter) throws SQLException {
+            if (joinSpecs.isEmpty()) {
+                leftTable.addFilter(filter);
+                return;
+            }
+
+            WhereNodeVisitor visitor = new WhereNodeVisitor(
+                    origResolver,
+                    this,
+                    phoenixStatement.getConnection());
+            filter.accept(visitor);
+        }
+
+        public void pushDownColumnRefVisitors(
+                ColumnRefParseNodeVisitor generalRefVisitor,
+                ColumnRefParseNodeVisitor joinLocalRefVisitor) throws SQLException {
+            for (ParseNode node : leftTable.getPostFilterParseNodes()) {
+                node.accept(generalRefVisitor);
+            }
+            for (ParseNode node : postFilters) {
+                node.accept(generalRefVisitor);
+            }
+            for (JoinSpec joinSpec : joinSpecs) {
+                JoinTable joinTable = joinSpec.getRhsJoinTable();
+                boolean hasSubJoin = !joinTable.getJoinSpecs().isEmpty();
+                for (EqualParseNode node : joinSpec.getOnConditions()) {
+                    node.getLHS().accept(generalRefVisitor);
+                    if (hasSubJoin) {
+                        node.getRHS().accept(generalRefVisitor);
+                    } else {
+                        node.getRHS().accept(joinLocalRefVisitor);
+                    }
+                }
+                joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor);
+            }
+        }
+
+        /**
+         * Pruning columns for each {@link JoinCompiler.Table} if
+         * {@link JoinCompiler.Table#isSubselect()}.
+         * @throws SQLException
+         */
+        public void pruneSubselectAliasedNodes() throws SQLException {
+            this.leftTable.pruneSubselectAliasedNodes();
+            for (JoinSpec joinSpec : joinSpecs) {
+                JoinTable rhsJoinTablesContext = joinSpec.getRhsJoinTable();;
+                rhsJoinTablesContext.pruneSubselectAliasedNodes();
+            }
+        }
+
+        public Expression compilePostFilterExpression(StatementContext context) throws SQLException {
+            List<ParseNode> filtersCombined = Lists.<ParseNode> newArrayList(postFilters);
+            return JoinCompiler.compilePostFilterExpression(context, filtersCombined);
+        }
+
+        /**
+         * Return a list of all applicable join strategies. The order of the strategies in the
+         * returned list is based on the static rule below. However, the caller can decide on
+         * an optimal join strategy by evaluating and comparing the costs.
+         * 1. If hint USE_SORT_MERGE_JOIN is specified,
+         *    return a singleton list containing only SORT_MERGE.
+         * 2. If 1) matches pattern "A LEFT/INNER/SEMI/ANTI JOIN B"; or
+         *       2) matches pattern "A LEFT/INNER/SEMI/ANTI JOIN B (LEFT/INNER/SEMI/ANTI JOIN C)+"
+         *          and hint NO_STAR_JOIN is not specified,
+         *    add BUILD_RIGHT to the returned list.
+         * 3. If matches pattern "A RIGHT/INNER JOIN B", where B is either a named table reference
+         *    or a flat sub-query,
+         *    add BUILD_LEFT to the returned list.
+         * 4. add SORT_MERGE to the returned list.
+         * @throws SQLException
+         */
+        public List<Strategy> getApplicableJoinStrategies() throws SQLException {
+            List<Strategy> strategies = Lists.newArrayList();
+            if (useSortMergeJoin) {
+                strategies.add(Strategy.SORT_MERGE);
+            } else {
+                if (getStarJoinVector() != null) {
+                    strategies.add(Strategy.HASH_BUILD_RIGHT);
+                }
+                JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
+                JoinType type = lastJoinSpec.getType();
+                if ((type == JoinType.Right || type == JoinType.Inner)
+                        && lastJoinSpec.getRhsJoinTable().getJoinSpecs().isEmpty()
+                        && lastJoinSpec.getRhsJoinTable().getLeftTable().isCouldPushToServerAsHashJoinProbeSide()) {
+                    strategies.add(Strategy.HASH_BUILD_LEFT);
+                }
+                strategies.add(Strategy.SORT_MERGE);
+            }
+
+            return strategies;
+        }
+
+        /**
+         * Returns a boolean vector indicating whether the evaluation of join expressions
+         * can be evaluated at an early stage if the input JoinSpec can be taken as a
+         * star join. Otherwise returns null.
+         * @return a boolean vector for a star join; or null for non star join.
+         * @throws SQLException
+         */
+        public boolean[] getStarJoinVector() throws SQLException {
+            int count = joinSpecs.size();
+            if (!leftTable.isCouldPushToServerAsHashJoinProbeSide() ||
+                    (!useStarJoin
+                            && count > 1
+                            && joinSpecs.get(count - 1).getType() != JoinType.Left
+                            && joinSpecs.get(count - 1).getType() != JoinType.Semi
+                            && joinSpecs.get(count - 1).getType() != JoinType.Anti
+                            && !joinSpecs.get(count - 1).isSingleValueOnly()))
+                return null;
+
+            boolean[] vector = new boolean[count];
+            for (int i = 0; i < count; i++) {
+                JoinSpec joinSpec = joinSpecs.get(i);
+                if (joinSpec.getType() != JoinType.Left
+                        && joinSpec.getType() != JoinType.Inner
+                        && joinSpec.getType() != JoinType.Semi
+                        && joinSpec.getType() != JoinType.Anti)
+                    return null;
+                vector[i] = true;
+                Iterator<TableRef> iter = joinSpec.getDependentTableRefs().iterator();
+                while (vector[i] == true && iter.hasNext()) {
+                    TableRef tableRef = iter.next();
+                    if (!tableRef.equals(leftTable.getTableRef())) {
+                        vector[i] = false;
+                    }
+                }
+            }
+
+            return vector;
+        }
+
+        /**
+         * create a new {@link JoinTable} exclude the last {@link JoinSpec},
+         * and try to push {@link #postFilters} to the new {@link JoinTable}.
+         * @param phoenixConnection
+         * @return
+         * @throws SQLException
+         */
+        public JoinTable createSubJoinTable(
+                PhoenixConnection phoenixConnection) throws SQLException {
+            assert joinSpecs.size() > 0;
+            JoinTable newJoinTablesContext = joinSpecs.size() > 1 ?
+                    new JoinTable(leftTable, joinSpecs.subList(0, joinSpecs.size() - 1)) :
+                    new JoinTable(leftTable);
+            JoinType rightmostJoinType = joinSpecs.get(joinSpecs.size() - 1).getType();
+            if(rightmostJoinType == JoinType.Right || rightmostJoinType == JoinType.Full) {
+                return newJoinTablesContext;
+            }
+
+            if(this.postFilters.isEmpty()) {
+                return newJoinTablesContext;
+            }
+
+            PushDownPostFilterParseNodeVisitor pushDownPostFilterNodeVistor =
+                    new PushDownPostFilterParseNodeVisitor(
+                            JoinCompiler.this.origResolver,
+                            newJoinTablesContext,
+                            phoenixConnection);
+            int index = 0;
+            List<ParseNode> newPostFilterParseNodes = null;
+            for(ParseNode postFilterParseNode : this.postFilters) {
+                ParseNode newPostFilterParseNode =
+                        postFilterParseNode.accept(pushDownPostFilterNodeVistor);
+                if(newPostFilterParseNode != postFilterParseNode &&
+                   newPostFilterParseNodes == null) {
+                    newPostFilterParseNodes =
+                            new ArrayList<ParseNode>(this.postFilters.subList(0, index));
+                }
+                if(newPostFilterParseNodes != null && newPostFilterParseNode != null) {
+                    newPostFilterParseNodes.add(newPostFilterParseNode);
+                }
+                index++;
+            }
+            if(newPostFilterParseNodes != null) {
+                this.postFilters = newPostFilterParseNodes;
+            }
+            return newJoinTablesContext;
+        }
+
+        public SelectStatement getAsSingleSubquery(SelectStatement query, boolean asSubquery) throws SQLException {
+            assert (isCouldPushToServerAsHashJoinProbeSide(query));
+
+            if (asSubquery)
+                return query;
+
+            return NODE_FACTORY.select(originalJoinSelectStatement, query.getFrom(), query.getWhere());
+        }
+
+        public boolean hasPostReference() {
+            for (Table table : allTables) {
+                if (table.isWildCardSelect()) {
+                    return true;
+                }
+            }
+
+            for (Map.Entry<ColumnRef, ColumnRefType> e : columnRefs.entrySet()) {
+                if (e.getValue() == ColumnRefType.GENERAL &&
+                    allTableRefs.contains(e.getKey().getTableRef())) {
+                    return true;
+                }
+            }
+
+            return false;
+        }
+
+        public boolean hasFilters() {
+           if (!postFilters.isEmpty())
+               return true;
+
+           if (isPrefilterAccepted && leftTable.hasFilters())
+               return true;
+
+           for (JoinSpec joinSpec : prefilterAcceptedTables) {
+               if (joinSpec.getRhsJoinTable().hasFilters())
+                   return true;
+           }
+
+           return false;
+        }
+    }
+
+    public class JoinSpec {
+        private final JoinType type;
+        private final List<EqualParseNode> onConditions;
+        private final JoinTable rhsJoinTable;
+        private final boolean singleValueOnly;
+        private Set<TableRef> dependentTableRefs;
+        private OnNodeVisitor onNodeVisitor;
+
+        private JoinSpec(JoinType type, ParseNode onNode, JoinTable joinTable,
+                boolean singleValueOnly, ColumnResolver resolver) throws SQLException {
+            this.type = type;
+            this.onConditions = new ArrayList<EqualParseNode>();
+            this.rhsJoinTable = joinTable;
+            this.singleValueOnly = singleValueOnly;
+            this.dependentTableRefs = new HashSet<TableRef>();
+            this.onNodeVisitor = new OnNodeVisitor(resolver, this, phoenixStatement.getConnection());
+            if (onNode != null) {
+                this.pushDownOnCondition(onNode);
+            }
+        }
+
+        /**
+         * <pre>
+         * 1.in {@link JoinSpec} ctor,try to push the filter in join on clause to where clause,
+         *   eg. for "a join b on a.id = b.id and b.code = 1 where a.name is not null", try to
+         *   push "b.code =1" in join on clause to where clause.
+         * 2.in{@link WhereNodeVisitor#visitLeave(ComparisonParseNode, List)}, for inner join,
+         *   try to push the join on condition in where clause to join on clause,
+         *   eg. for "a join b on a.id = b.id where a.name = b.name", try to push "a.name=b.name"
+         *   in where clause to join on clause.
+         * </pre>
+         * @param node
+         * @throws SQLException
+         */
+        public void pushDownOnCondition(ParseNode node) throws SQLException {
+            node.accept(onNodeVisitor);
+        }
+
+        public JoinType getType() {
+            return type;
+        }
+
+        public List<EqualParseNode> getOnConditions() {
+            return onConditions;
+        }
+
+        public JoinTable getRhsJoinTable() {
+            return rhsJoinTable;
+        }
+
+        public List<TableRef>  getRhsJoinTableRefs() {
+            return this.rhsJoinTable.getAllTableRefs();
+        }
+
+        public void pushDownFilterToRhsJoinTable(ParseNode parseNode) throws SQLException {
+             this.rhsJoinTable.pushDownFilter(parseNode);
+        }
+
+        public void addOnCondition(EqualParseNode equalParseNode) {
+            this.onConditions.add(equalParseNode);
+        }
+
+        public void addDependentTableRefs(Collection<TableRef> tableRefs) {
+            this.dependentTableRefs.addAll(tableRefs);
+        }
+
+        public boolean isSingleValueOnly() {
+            return singleValueOnly;
+        }
+
+        public Set<TableRef> getDependentTableRefs() {
+            return dependentTableRefs;
+        }
+
+        public Pair<List<Expression>, List<Expression>> compileJoinConditions(StatementContext lhsCtx, StatementContext rhsCtx, Strategy strategy) throws SQLException {
+            if (onConditions.isEmpty()) {
+                return new Pair<List<Expression>, List<Expression>>(
+                        Collections.<Expression> singletonList(LiteralExpression.newConstant(1)),
+                        Collections.<Expression> singletonList(LiteralExpression.newConstant(1)));
+            }
+
+            List<Pair<Expression, Expression>> compiled = Lists.<Pair<Expression, Expression>> newArrayListWithExpectedSize(onConditions.size());
+            ExpressionCompiler lhsCompiler = new ExpressionCompiler(lhsCtx);
+            ExpressionCompiler rhsCompiler = new ExpressionCompiler(rhsCtx);
+            for (EqualParseNode condition : onConditions) {
+                lhsCompiler.reset();
+                Expression left = condition.getLHS().accept(lhsCompiler);
+                rhsCompiler.reset();
+                Expression right = condition.getRHS().accept(rhsCompiler);
+                PDataType toType = getCommonType(left.getDataType(), right.getDataType());
+                SortOrder toSortOrder = strategy == Strategy.SORT_MERGE ? SortOrder.ASC : (strategy == Strategy.HASH_BUILD_LEFT ? right.getSortOrder() : left.getSortOrder());
+                if (left.getDataType() != toType || left.getSortOrder() != toSortOrder) {
+                    left = CoerceExpression.create(left, toType, toSortOrder, left.getMaxLength());
+                }
+                if (right.getDataType() != toType || right.getSortOrder() != toSortOrder) {
+                    right = CoerceExpression.create(right, toType, toSortOrder, right.getMaxLength());
+                }
+                compiled.add(new Pair<Expression, Expression>(left, right));
+            }
+            // TODO PHOENIX-4618:
+            // For Stategy.SORT_MERGE, we probably need to re-order the join keys based on the
+            // specific ordering required by the join's parent, or re-order the following way
+            // to align with group-by expressions' re-ordering.
+            if (strategy != Strategy.SORT_MERGE) {
+                Collections.sort(compiled, new Comparator<Pair<Expression, Expression>>() {
+                    @Override
+                    public int compare(Pair<Expression, Expression> o1, Pair<Expression, Expression> o2) {
+                        Expression e1 = o1.getFirst();
+                        Expression e2 = o2.getFirst();
+                        boolean isFixed1 = e1.getDataType().isFixedWidth();
+                        boolean isFixed2 = e2.getDataType().isFixedWidth();
+                        boolean isFixedNullable1 = e1.isNullable() &&isFixed1;
+                        boolean isFixedNullable2 = e2.isNullable() && isFixed2;
+                        if (isFixedNullable1 == isFixedNullable2) {
+                            if (isFixed1 == isFixed2) {
+                                return 0;
+                            } else if (isFixed1) {
+                                return -1;
+                            } else {
+                                return 1;
+                            }
+                        } else if (isFixedNullable1) {
+                            return 1;
+                        } else {
+                            return -1;
+                        }
+                    }
+                });
+            }
+            List<Expression> lConditions = Lists.<Expression> newArrayListWithExpectedSize(compiled.size());
+            List<Expression> rConditions = Lists.<Expression> newArrayListWithExpectedSize(compiled.size());
+            for (Pair<Expression, Expression> pair : compiled) {
+                lConditions.add(pair.getFirst());
+                rConditions.add(pair.getSecond());
+            }
+
+            return new Pair<List<Expression>, List<Expression>>(lConditions, rConditions);
+        }
+
+        private PDataType getCommonType(PDataType lType, PDataType rType) throws SQLException {
+            if (lType == rType)
+                return lType;
+
+            if (!lType.isComparableTo(rType))
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH)
+                    .setMessage("On-clause LHS expression and RHS expression must be comparable. LHS type: " + lType + ", RHS type: " + rType)
+                    .build().buildException();
+
+            if (lType.isCoercibleTo(PTinyint.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PTinyint.INSTANCE))) {
+              return lType; // to preserve UNSIGNED type
+            }
+            if (lType.isCoercibleTo(PSmallint.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PSmallint.INSTANCE))) {
+              return lType; // to preserve UNSIGNED type
+            }
+            if (lType.isCoercibleTo(PInteger.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PInteger.INSTANCE))) {
+              return lType; // to preserve UNSIGNED type
+            }
+            if (lType.isCoercibleTo(PLong.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PLong.INSTANCE))) {
+              return lType; // to preserve UNSIGNED type
+            }
+            if (lType.isCoercibleTo(PDouble.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PDouble.INSTANCE))) {
+              return lType; // to preserve UNSIGNED type
+            }
+            if (lType.isCoercibleTo(PDecimal.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PDecimal.INSTANCE))) {
+              return PDecimal.INSTANCE;
+            }
+            if (lType.isCoercibleTo(PDate.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PDate.INSTANCE))) {
+              return lType;
+            }
+            if (lType.isCoercibleTo(PTimestamp.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PTimestamp.INSTANCE))) {
+              return lType;
+            }
+            if (lType.isCoercibleTo(PVarchar.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PVarchar.INSTANCE))) {
+              return PVarchar.INSTANCE;
+            }
+            if (lType.isCoercibleTo(PBoolean.INSTANCE)
+                && (rType == null || rType.isCoercibleTo(PBoolean.INSTANCE))) {
+              return PBoolean.INSTANCE;
+            }
+            return PVarbinary.INSTANCE;
+        }
+    }
+
+    public class Table {
+        private TableNode tableNode;
+        private final boolean isWildcard;
+        private final List<ColumnDef> dynamicColumns;
+        private final Double tableSamplingRate;
+        private SelectStatement subselectStatement;
+        private TableRef tableRef;
+        /**
+         * Which could as this {@link Table}'s where conditions.
+         * Note: for {@link #isSubselect()}, added preFilterParseNode
+         * is at first rewritten by
+         * {@link SubselectRewriter#rewritePreFilterForSubselect}.
+         */
+        private final List<ParseNode> preFilterParseNodes;
+        /**
+         * Only make sense for {@link #isSubselect()}.
+         * {@link #postFilterParseNodes} could not as this
+         * {@link Table}'s where conditions, but need to filter after
+         * {@link #getSelectStatementByApplyPreFiltersForSubselect()}
+         * is executed.
+         */
+        private final List<ParseNode> postFilterParseNodes;
+        /**
+         * Determined by {@link SubselectRewriter#isFilterCanPushDownToSelect}.
+         * Only make sense for {@link #isSubselect()},
+         */
+        private final boolean filterCanPushDownToSubselect;
+
+        private Table(TableNode tableNode, boolean isWildcard, List<ColumnDef> dynamicColumns,
+                      Double tableSamplingRate, TableRef tableRef) {
+            this.tableNode = tableNode;
+            this.isWildcard = isWildcard;
+            this.dynamicColumns = dynamicColumns;
+            this.tableSamplingRate=tableSamplingRate;
+            this.subselectStatement = null;
+            this.tableRef = tableRef;
+            this.preFilterParseNodes = new ArrayList<ParseNode>();
+            this.postFilterParseNodes = Collections.<ParseNode>emptyList();
+            this.filterCanPushDownToSubselect = false;
+        }
+
+        private Table(DerivedTableNode tableNode, boolean isWildcard, TableRef tableRef) throws SQLException {
+            this.tableNode = tableNode;
+            this.isWildcard = isWildcard;
+            this.dynamicColumns = Collections.<ColumnDef>emptyList();
+            this.tableSamplingRate=ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE;
+            this.subselectStatement = SubselectRewriter.flatten(tableNode.getSelect(), phoenixStatement.getConnection());
+            this.tableRef = tableRef;
+            this.preFilterParseNodes = new ArrayList<ParseNode>();
+            this.postFilterParseNodes = new ArrayList<ParseNode>();
+            this.filterCanPushDownToSubselect = SubselectRewriter.isFilterCanPushDownToSelect(subselectStatement);
+        }
+
+        public TableNode getTableNode() {
+            return tableNode;
+        }
+
+        public List<ColumnDef> getDynamicColumns() {
+            return dynamicColumns;
+        }
+        
+        public Double getTableSamplingRate() {
+            return tableSamplingRate;
+        }
+
+        public boolean isSubselect() {
+            return subselectStatement != null;
+        }
+
+        public SelectStatement getSubselectStatement() {
+            return this.subselectStatement;
+        }
+
+        /**
+         * Pruning columns if {@link #isSubselect()}.
+         * Note: If some columns are pruned, the {@link JoinCompiler#origResolver} should be refreshed.
+         * @throws SQLException
+         */
+        public void pruneSubselectAliasedNodes() throws SQLException {
+            if(!this.isSubselect()) {
+                return;
+            }
+            Set<String> referencedColumnNames = this.getReferencedColumnNames();
+            SelectStatement newSubselectStatement =
+                    SubselectRewriter.pruneSelectAliasedNodes(
+                            this.subselectStatement,
+                            referencedColumnNames,
+                            phoenixStatement.getConnection());
+            if(!newSubselectStatement.getSelect().equals(this.subselectStatement.getSelect())) {
+                /**
+                 * The columns are pruned, so {@link ColumnResolver} should be refreshed.
+                 */
+                DerivedTableNode newDerivedTableNode =
+                        NODE_FACTORY.derivedTable(this.tableNode.getAlias(), newSubselectStatement);
+                TableRef newTableRef =
+                        FromCompiler.refreshDerivedTableNode(origResolver, newDerivedTableNode);
+                assert newTableRef != null;
+                this.subselectStatement = newSubselectStatement;
+                this.tableRef = newTableRef;
+                this.tableNode = newDerivedTableNode;
+            }
+        }
+
+        /**
+         * Collect the referenced columns of this {@link Table}
+         * according to {@link JoinCompiler#columnNodes}.
+         * @return
+         * @throws SQLException
+         */
+        private Set<String> getReferencedColumnNames() throws SQLException {
+            assert(this.isSubselect());
+            if (isWildCardSelect()) {
+                return null;
+            }
+            Set<String> referencedColumnNames = new HashSet<String>();
+            for (Map.Entry<ColumnRef, ColumnParseNode> entry : columnNodes.entrySet()) {
+                if (tableRef.equals(entry.getKey().getTableRef())) {
+                    ColumnParseNode columnParseNode = entry.getValue();
+                    String normalizedColumnName = SchemaUtil.getNormalizedColumnName(columnParseNode);
+                    referencedColumnNames.add(normalizedColumnName);
+                }
+            }
+            return referencedColumnNames;
+        }
+
+        /**
+         * Returns all the basic select nodes, no aggregation.
+         */
+        public List<AliasedNode> getSelectAliasedNodes() {
+            if (isWildCardSelect()) {
+                return Collections.singletonList(NODE_FACTORY.aliasedNode(null, NODE_FACTORY.wildcard()));
+            }
+
+            List<AliasedNode> ret = new ArrayList<AliasedNode>();
+            for (Map.Entry<ColumnRef, ColumnParseNode> entry : columnNodes.entrySet()) {
+                if (tableRef.equals(entry.getKey().getTableRef())) {
+                    ret.add(NODE_FACTORY.aliasedNode(null, entry.getValue()));
+                }
+            }
+            if (ret.isEmpty()) {
+                ret.add(NODE_FACTORY.aliasedNode(null, NODE_FACTORY.literal(1)));
+            }
+            return ret;
+        }
+
+        public List<ParseNode> getPreFilterParseNodes() {
+            return preFilterParseNodes;
+        }
+
+        public List<ParseNode> getPostFilterParseNodes() {
+            return postFilterParseNodes;
+        }
+
+        public TableRef getTableRef() {
+            return tableRef;
+        }
+
+        public void addFilter(ParseNode filter) throws SQLException {
+            if (!isSubselect() || filterCanPushDownToSubselect) {
+                this.addPreFilter(filter);
+            } else {
+                postFilterParseNodes.add(filter);
+            }
+        }
+
+        /**
+         * If {@link #isSubselect()}, preFilterParseNode is at first rewritten by
+         * {@link SubselectRewriter#rewritePreFilterForSubselect}
+         * @param preFilterParseNode
+         * @throws SQLException
+         */
+        private void addPreFilter(ParseNode preFilterParseNode) throws SQLException {
+            if(this.isSubselect()) {
+                preFilterParseNode =
+                        SubselectRewriter.rewritePreFilterForSubselect(
+                                preFilterParseNode,
+                                this.subselectStatement,
+                                tableNode.getAlias());
+            }
+            preFilterParseNodes.add(preFilterParseNode);
+        }
+
+        public ParseNode getCombinedPreFilterParseNodes() {
+            return combine(preFilterParseNodes);
+        }
+
+        /**
+         * Get this {@link Table}'s new {@link SelectStatement} by applying {@link #preFilterParseNodes},
+         * {@link #postFilterParseNodes} and additional newOrderByNodes.
+         * @param newOrderByNodes
+         * @return
+         * @throws SQLException
+         */
+        public SelectStatement getAsSubquery(List<OrderByNode> newOrderByNodes) throws SQLException {
+            if (isSubselect()) {
+                return SubselectRewriter.applyOrderByAndPostFilters(
+                        this.getSelectStatementByApplyPreFiltersForSubselect(),
+                        newOrderByNodes,
+                        tableNode.getAlias(),
+                        postFilterParseNodes);
+            }
+
+            /**
+             * For flat table, {@link #postFilterParseNodes} is empty , because it can safely pushed down as
+             * {@link #preFilterParseNodes}.
+             */
+            assert postFilterParseNodes == null || postFilterParseNodes.isEmpty();
+            return NODE_FACTORY.select(
+                    tableNode,
+                    originalJoinSelectStatement.getHint(),
+                    false,
+                    getSelectAliasedNodes(),
+                    getCombinedPreFilterParseNodes(),
+                    null,
+                    null,
+                    newOrderByNodes,
+                    null,
+                    null,
+                    0,
+                    false,
+                    originalJoinSelectStatement.hasSequence(),
+                    Collections.<SelectStatement> emptyList(),
+                    originalJoinSelectStatement.getUdfParseNodes());
+        }
+
+        public SelectStatement getAsSubqueryForOptimization(boolean applyGroupByOrOrderBy) throws SQLException {
+            assert (!isSubselect());
+
+            SelectStatement query = getAsSubquery(null);
+            if (!applyGroupByOrOrderBy)
+                return query;
+
+            boolean addGroupBy = false;
+            boolean addOrderBy = false;
+            if (originalJoinSelectStatement.getGroupBy() != null && !originalJoinSelectStatement.getGroupBy().isEmpty()) {
+                ColumnRefParseNodeVisitor groupByVisitor = new ColumnRefParseNodeVisitor(origResolver, phoenixStatement.getConnection());
+                for (ParseNode node : originalJoinSelectStatement.getGroupBy()) {
+                    node.accept(groupByVisitor);
+                }
+                Set<TableRef> set = groupByVisitor.getTableRefSet();
+                if (set.size() == 1 && tableRef.equals(set.iterator().next())) {
+                    addGroupBy = true;
+                }
+            } else if (originalJoinSelectStatement.getOrderBy() != null && !originalJoinSelectStatement.getOrderBy().isEmpty()) {
+                ColumnRefParseNodeVisitor orderByVisitor = new ColumnRefParseNodeVisitor(origResolver, phoenixStatement.getConnection());
+                for (OrderByNode node : originalJoinSelectStatement.getOrderBy()) {
+                    node.getNode().accept(orderByVisitor);
+                }
+                Set<TableRef> set = orderByVisitor.getTableRefSet();
+                if (set.size() == 1 && tableRef.equals(set.iterator().next())) {
+                    addOrderBy = true;
+                }
+            }
+
+            if (!addGroupBy && !addOrderBy)
+                return query;
+
+            List<AliasedNode> selectList = query.getSelect();
+            if (addGroupBy) {
+                assert (!isWildCardSelect());
+                selectList = new ArrayList<AliasedNode>(query.getSelect().size());
+                for (AliasedNode aliasedNode : query.getSelect()) {
+                    ParseNode node = NODE_FACTORY.function(
+                            MinAggregateFunction.NAME, Collections.singletonList(aliasedNode.getNode()));
+                    selectList.add(NODE_FACTORY.aliasedNode(null, node));
+                }
+            }
+
+            return NODE_FACTORY.select(query.getFrom(), query.getHint(), query.isDistinct(), selectList,
+                    query.getWhere(), addGroupBy ? originalJoinSelectStatement.getGroupBy() : query.getGroupBy(),
+                    addGroupBy ? null : query.getHaving(), addOrderBy ? originalJoinSelectStatement.getOrderBy() : query.getOrderBy(),
+                    query.getLimit(), query.getOffset(), query.getBindCount(), addGroupBy, query.hasSequence(),
+                    query.getSelects(), query.getUdfParseNodes());
+        }
+
+        public boolean hasFilters() {
+            return isSubselect() ?
+                   (!postFilterParseNodes.isEmpty() || subselectStatement.getWhere() != null || subselectStatement.getHaving() != null) :
+                    !preFilterParseNodes.isEmpty();
+        }
+
+        /**
+         * Check if this {@link Table} could be pushed to RegionServer
+         * {@link HashJoinRegionScanner} as the probe side of Hash join.
+         * @return
+         * @throws SQLException
+         */
+        public boolean isCouldPushToServerAsHashJoinProbeSide() throws SQLException {
+            /**
+             * If {@link #postFilterParseNodes} is not empty, obviously this {@link Table}
+             * should execute {@link #postFilterParseNodes} before join.
+             */
+            if(this.postFilterParseNodes != null && !this.postFilterParseNodes.isEmpty()) {
+                return false;
+            }
+
+            SelectStatement selectStatementToUse = this.getAsSubquery(null);
+            RewriteResult rewriteResult =
+                    ParseNodeUtil.rewrite(selectStatementToUse, phoenixStatement.getConnection());
+            return JoinCompiler.isCouldPushToServerAsHashJoinProbeSide(
+                    rewriteResult.getRewrittenSelectStatement());
+        }
+
+        /**
+         * Get this {@link Table}'s new {@link SelectStatement} only applying
+         * {@link #preFilterParseNodes} for {@link #isSubselect()}.
+         * @return
+         */
+        private SelectStatement getSelectStatementByApplyPreFiltersForSubselect() {
+            return SubselectRewriter.applyPreFiltersForSubselect(
+                    subselectStatement,
+                    preFilterParseNodes,
+                    tableNode.getAlias());
+
+        }
+
+        protected boolean isWildCardSelect() {
+            return isWildcard;
+        }
+
+        public void projectColumns(Scan scan) {
+            assert(!isSubselect());
+            if (isWildCardSelect()) {
+                scan.getFamilyMap().clear();
+                return;
+            }
+            for (ColumnRef columnRef : columnRefs.keySet()) {
+                if (columnRef.getTableRef().equals(tableRef)
+                        && !SchemaUtil.isPKColumn(columnRef.getColumn())
+                        && !(columnRef instanceof LocalIndexColumnRef)) {
+                	EncodedColumnsUtil.setColumns(columnRef.getColumn(), tableRef.getTable(), scan);
+                }
+            }
+        }
+
+        public PTable createProjectedTable(boolean retainPKColumns, StatementContext context) throws SQLException {
+            assert(!isSubselect());
+            List<ColumnRef> sourceColumns = new ArrayList<ColumnRef>();
+            PTable table = tableRef.getTable();
+            if (retainPKColumns) {
+                for (PColumn column : table.getPKColumns()) {
+                    sourceColumns.add(new ColumnRef(tableRef, column.getPosition()));
+                }
+            }
+            if (isWildCardSelect()) {
+                for (PColumn column : table.getColumns()) {
+                    if (!retainPKColumns || !SchemaUtil.isPKColumn(column)) {
+                        sourceColumns.add(new ColumnRef(tableRef, column.getPosition()));
+                    }
+                }
+            } else {
+                for (Map.Entry<ColumnRef, ColumnRefType> e : columnRefs.entrySet()) {
+                    ColumnRef columnRef = e.getKey();
+                    if (columnRef.getTableRef().equals(tableRef)
+                            && (!retainPKColumns || !SchemaUtil.isPKColumn(columnRef.getColumn()))) {
+                        if (columnRef instanceof LocalIndexColumnRef) {
+                            sourceColumns.add(new IndexUncoveredDataColumnRef(context, tableRef,
+                                    IndexUtil.getIndexColumnName(columnRef.getColumn())));
+                        } else {
+                            sourceColumns.add(columnRef);
+                        }
+                    }
+                }
+            }
+
+            return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, retainPKColumns);
+        }
+
+        public PTable createProjectedTable(RowProjector rowProjector) throws SQLException {
+            assert(isSubselect());
+            TableRef tableRef = FromCompiler.getResolverForCompiledDerivedTable(phoenixStatement.getConnection(), this.tableRef, rowProjector).getTables().get(0);
+            List<ColumnRef> sourceColumns = new ArrayList<ColumnRef>();
+            PTable table = tableRef.getTable();
+            for (PColumn column : table.getColumns()) {
+                sourceColumns.add(new ColumnRef(tableRef, column.getPosition()));
+            }
+            return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, false);
+        }
+    }
+
+    /**
+     * Push down {@link JoinTable#postFilters} of Outermost-JoinTable to
+     * {@link JoinTable#postFilters} of Sub-JoinTable
+     */
+    private static class PushDownPostFilterParseNodeVisitor extends AndRewriterBooleanParseNodeVisitor {
+        private ColumnRefParseNodeVisitor columnRefParseNodeVisitor;
+        /**
+         * Sub-JoinTable to accept pushed down PostFilters.
+         */
+        private JoinTable joinTable;
+
+        public PushDownPostFilterParseNodeVisitor(
+                ColumnResolver resolver,
+                JoinTable joinTablesContext,
+                PhoenixConnection connection) {
+            super(NODE_FACTORY);
+            this.joinTable = joinTablesContext;
+            this.columnRefParseNodeVisitor = new ColumnRefParseNodeVisitor(resolver, connection);
+        }
+
+        @Override
+        protected ParseNode leaveBooleanNode(
+                ParseNode parentParseNode, List<ParseNode> childParseNodes) throws SQLException {
+            columnRefParseNodeVisitor.reset();
+            parentParseNode.accept(columnRefParseNodeVisitor);
+            ColumnRefParseNodeVisitor.ColumnRefType columnRefType =
+                    columnRefParseNodeVisitor.getContentType(
+                            this.joinTable.getAllTableRefs());
+            if(columnRefType == ColumnRefParseNodeVisitor.ColumnRefType.NONE ||
+               columnRefType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY){
+                this.joinTable.postFilters.add(parentParseNode);
+                return null;
+            }
+            return parentParseNode;
+        }
+    }
+
+    private static class WhereNodeVisitor extends AndBooleanParseNodeVisitor<Void> {
+        private ColumnRefParseNodeVisitor columnRefVisitor;
+        private JoinTable joinTable;
+
+        public WhereNodeVisitor(
+                ColumnResolver resolver,
+                JoinTable joinTablesContext,
+                PhoenixConnection connection) {
+            this.joinTable = joinTablesContext;
+            this.columnRefVisitor = new ColumnRefParseNodeVisitor(resolver, connection);
+        }
+
+        @Override
+        protected Void leaveBooleanNode(ParseNode node,
+                List<Void> l) throws SQLException {
+            columnRefVisitor.reset();
+            node.accept(columnRefVisitor);
+            ColumnRefParseNodeVisitor.ColumnRefType type =
+                    columnRefVisitor.getContentType(this.joinTable.getLeftTableRef());
+            switch (type) {
+            case NONE:
+            case SELF_ONLY:
+                this.joinTable.addLeftTableFilter(node);
+                break;
+            case FOREIGN_ONLY:
+                JoinTable matched = null;
+                for (JoinSpec joinSpec : this.joinTable.getPrefilterAcceptedJoinSpecs()) {
+                    if (columnRefVisitor.getContentType(
+                            joinSpec.getRhsJoinTable().getAllTableRefs()) ==
+                        ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) {
+                        matched = joinSpec.getRhsJoinTable();
+                        break;
+                    }
+                }
+                if (matched != null) {
+                    matched.pushDownFilter(node);
+                } else {
+                    this.joinTable.addPostJoinFilter(node);
+                }
+                break;
+            default:
+                this.joinTable.addPostJoinFilter(node);
+                break;
+            }
+            return null;
+        }
+
+        @Override
+        protected Void leaveNonBooleanNode(ParseNode node, List<Void> l) throws SQLException {
+            return null;
+        }
+
+        @Override
+        public Void visitLeave(AndParseNode node, List<Void> l) throws SQLException {
+            return null;
+        }
+
+        @Override
+        public Void visitLeave(ComparisonParseNode node, List<Void> l)
+                throws SQLException {
+            if (!(node instanceof EqualParseNode))
+                return leaveBooleanNode(node, l);
+
+            List<JoinSpec> prefilterAcceptedJoinSpecs =
+                    this.joinTable.getPrefilterAcceptedJoinSpecs();
+            ListIterator<JoinSpec> iter =
+                    prefilterAcceptedJoinSpecs.listIterator(prefilterAcceptedJoinSpecs.size());
+            while (iter.hasPrevious()) {
+                JoinSpec joinSpec = iter.previous();
+                if (joinSpec.getType() != JoinType.Inner || joinSpec.isSingleValueOnly()) {
+                    continue;
+                }
+
+                try {
+                    joinSpec.pushDownOnCondition(node);
+                    return null;
+                } catch (SQLException e) {
+                }
+            }
+
+            return leaveBooleanNode(node, l);
+        }
+    }
+
+    private static class OnNodeVisitor extends AndBooleanParseNodeVisitor<Void> {
+        private final ColumnRefParseNodeVisitor columnRefVisitor;
+        private final JoinSpec joinSpec;
+
+        public OnNodeVisitor(
+                ColumnResolver resolver, JoinSpec joinSpec, PhoenixConnection connection) {
+            this.joinSpec = joinSpec;
+            this.columnRefVisitor = new ColumnRefParseNodeVisitor(resolver, connection);
+        }
+
+        @Override
+        protected Void leaveBooleanNode(ParseNode node,
+                List<Void> l) throws SQLException {
+            columnRefVisitor.reset();
+            node.accept(columnRefVisitor);
+            ColumnRefParseNodeVisitor.ColumnRefType type =
+                    columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs());
+            if (type == ColumnRefParseNodeVisitor.ColumnRefType.NONE
+                    || type == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) {
+                this.joinSpec.pushDownFilterToRhsJoinTable(node);
+            } else {
+                throwAmbiguousJoinConditionException();
+            }
+            return null;
+        }
+
+        @Override
+        protected Void leaveNonBooleanNode(ParseNode node, List<Void> l) throws SQLException {
+            return null;
+        }
+
+        @Override
+        public Void visitLeave(AndParseNode node, List<Void> l) throws SQLException {
+            return null;
+        }
+
+        @Override
+        public Void visitLeave(ComparisonParseNode node, List<Void> l)
+                throws SQLException {
+            if (!(node instanceof EqualParseNode))
+                return leaveBooleanNode(node, l);
+            columnRefVisitor.reset();
+            node.getLHS().accept(columnRefVisitor);
+            ColumnRefParseNodeVisitor.ColumnRefType lhsType =
+                    columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs());
+            Set<TableRef> lhsTableRefSet = Sets.newHashSet(columnRefVisitor.getTableRefSet());
+            columnRefVisitor.reset();
+            node.getRHS().accept(columnRefVisitor);
+            ColumnRefParseNodeVisitor.ColumnRefType rhsType =
+                    columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs());
+            Set<TableRef> rhsTableRefSet = Sets.newHashSet(columnRefVisitor.getTableRefSet());
+            if ((lhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY || lhsType == ColumnRefParseNodeVisitor.ColumnRefType.NONE)
+                    && (rhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY || rhsType == ColumnRefParseNodeVisitor.ColumnRefType.NONE)) {
+                this.joinSpec.pushDownFilterToRhsJoinTable(node);
+            } else if (lhsType == ColumnRefParseNodeVisitor.ColumnRefType.FOREIGN_ONLY
+                    && rhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) {
+                this.joinSpec.addOnCondition((EqualParseNode) node);
+                this.joinSpec.addDependentTableRefs(lhsTableRefSet);
+            } else if (rhsType == ColumnRefParseNodeVisitor.ColumnRefType.FOREIGN_ONLY
+                    && lhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) {
+                this.joinSpec.addOnCondition(NODE_FACTORY.equal(node.getRHS(), node.getLHS()));
+                this.joinSpec.addDependentTableRefs(rhsTableRefSet);
+            } else {
+                throwAmbiguousJoinConditionException();
+            }
+            return null;
+        }
+
+        /*
+         * Conditions in the ON clause can only be:
+         * 1) an equal test between a self table expression and a foreign
+         *    table expression.
+         * 2) a boolean condition referencing to the self table only.
+         * Otherwise, it can be ambiguous.
+         */
+        public void throwAmbiguousJoinConditionException() throws SQLException {
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.AMBIGUOUS_JOIN_CONDITION).build().buildException();
+        }
+    }
+
+    private static class LocalIndexColumnRef extends ColumnRef {
+        private final TableRef indexTableRef;
+
+        public LocalIndexColumnRef(TableRef tableRef, String familyName,
+                String columnName, TableRef indexTableRef) throws MetaDataEntityNotFoundException {
+            super(tableRef, familyName, columnName);
+            this.indexTableRef = indexTableRef;
+        }
+
+        @Override
+        public TableRef getTableRef() {
+            return indexTableRef;
+        }
+    }
+
+    private static class ColumnRefParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor {
+        public enum ColumnRefType {NONE, SELF_ONLY, FOREIGN_ONLY, COMPLEX};
+
+        private final ColumnResolver resolver;
+        private final PhoenixConnection connection;
+        private final Set<TableRef> tableRefSet;
+        private final Map<ColumnRef, ColumnParseNode> columnRefMap;
+
+        public ColumnRefParseNodeVisitor(ColumnResolver resolver, PhoenixConnection connection) {
+            this.resolver = resolver;
+            this.tableRefSet = new HashSet<TableRef>();
+            this.columnRefMap = new HashMap<ColumnRef, ColumnParseNode>();
+            this.connection = connection;
+        }
+
+        public void reset() {
+            this.tableRefSet.clear();
+            this.columnRefMap.clear();
+        }
+
+        @Override
+        public Void visit(ColumnParseNode node) throws SQLException {
+            ColumnRef columnRef = null;
+            try {
+                columnRef = resolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName());
+            } catch (ColumnNotFoundException e) {
+                // This could be an IndexUncoveredDataColumnRef. If so, the table name must have
+                // been appended by the IndexStatementRewriter, and we can convert it into.
+                TableRef tableRef = resolver.resolveTable(node.getSchemaName(), node.getTableName());
+                if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) {
+                    TableRef parentTableRef = FromCompiler.getResolver(
+                            NODE_FACTORY.namedTable(null, TableName.create(tableRef.getTable()
+                                    .getSchemaName().getString(), tableRef.getTable()
+                                    .getParentTableName().getString())), connection).resolveTable(
+                            tableRef.getTable().getSchemaName().getString(),
+                            tableRef.getTable().getParentTableName().getString());
+                    columnRef = new LocalIndexColumnRef(parentTableRef,
+                            IndexUtil.getDataColumnFamilyName(node.getName()),
+                            IndexUtil.getDataColumnName(node.getName()), tableRef);
+                } else {
+                    throw e;
+                }
+            }
+            columnRefMap.put(columnRef, node);
+            tableRefSet.add(columnRef.getTableRef());
+            return null;
+        }
+
+        public Set<TableRef> getTableRefSet() {
+            return tableRefSet;
+        }
+
+        public Map<ColumnRef, ColumnParseNode> getColumnRefMap() {
+            return columnRefMap;
+        }
+
+        public ColumnRefType getContentType(List<TableRef> selfTableRefs) {
+            if (tableRefSet.isEmpty())
+                return ColumnRefType.NONE;
+
+            ColumnRefType ret = ColumnRefType.NONE;
+            for (TableRef tRef : tableRefSet) {
+                boolean isSelf = selfTableRefs.contains(tRef);
+                switch (ret) {
+                case NONE:
+                    ret = isSelf ? ColumnRefType.SELF_ONLY : ColumnRefType.FOREIGN_ONLY;
+                    break;
+                case SELF_ONLY:
+                    ret = isSelf ? ColumnRefType.SELF_ONLY : ColumnRefType.COMPLEX;
+                    break;
+                case FOREIGN_ONLY:
+                    ret = isSelf ? ColumnRefType.COMPLEX : ColumnRefType.FOREIGN_ONLY;
+                    break;
+                default: // COMPLEX do nothing
+                    break;
+                }
+
+                if (ret == ColumnRefType.COMPLEX) {
+                    break;
+                }
+            }
+
+            return ret;
+        }
+    }
+
+    // for creation of new statements
+    private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory();
+
+    /**
+     * Check if this {@link Table} could be pushed to RegionServer
+     * {@link HashJoinRegionScanner} as the probe side of Hash join.
+     * Note: the {@link SelectStatement} parameter must be rewritten by
+     * {@link ParseNodeUtil#rewrite} before this method.
+     * {@link SelectStatement} parameter could has NonCorrelated subquery,
+     * but for Correlated subquery, {@link ParseNodeUtil#rewrite} rewrite
+     * it as join.
+     * Note: {@link SelectStatement} could also have {@link OrderBy},but we
+     * could ignore the {@link OrderBy} because we do not guarantee the {@link OrderBy}
+     * after join.
+     * @param selectStatement
+     * @return
+     */
+    private static boolean isCouldPushToServerAsHashJoinProbeSide(SelectStatement selectStatement) {
+        return !selectStatement.isJoin()
+                && !selectStatement.isAggregate()
+                && !selectStatement.isDistinct()
+                && !(selectStatement.getFrom() instanceof DerivedTableNode)
+                && selectStatement.getLimit() == null
+                && selectStatement.getOffset() == null;
+    }
+
+    private static ParseNode combine(List<ParseNode> nodes) {
+        if (nodes.isEmpty())
+            return null;
+
+        if (nodes.size() == 1)
+            return nodes.get(0);
+
+        return NODE_FACTORY.and(nodes);
+    }
+
+    private boolean isWildCardSelectForTable(List<AliasedNode> select, TableRef tableRef, ColumnResolver resolver) throws SQLException {
+        for (AliasedNode aliasedNode : select) {
+            ParseNode node = aliasedNode.getNode();
+            if (node instanceof TableWildcardParseNode) {
+                TableName tableName = ((TableWildcardParseNode) node).getTableName();
+                if (tableRef.equals(resolver.resolveTable(tableName.getSchemaName(), tableName.getTableName()))) {
+                    return true;
+                }
+
+            }
+        }
+        return false;
+    }
+
+    private static Expression compilePostFilterExpression(StatementContext context, List<ParseNode> postFilters) throws SQLException {
+        if (postFilters.isEmpty())
+            return null;
+
+        ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
+        List<Expression> expressions = new ArrayList<Expression>(postFilters.size());
+        for (ParseNode postFilter : postFilters) {
+            expressionCompiler.reset();
+            Expression expression = postFilter.accept(expressionCompiler);
+            expressions.add(expression);
+        }
+
+        if (expressions.size() == 1)
+            return expressions.get(0);
+
+        return AndExpression.create(expressions);
+    }
+
+    public static PTable joinProjectedTables(PTable left, PTable right, JoinType type) throws SQLException {
+        Preconditions.checkArgument(left.getType() == PTableType.PROJECTED);
+        Preconditions.checkArgument(right.getType() == PTableType.PROJECTED);
+        List<PColumn> merged = Lists.<PColumn> newArrayList();
+        int startingPosition = left.getBucketNum() == null ? 0 : 1;
+        if (type == JoinType.Full) {
+            for (int i = startingPosition; i < left.getColumns().size(); i++) {
+                PColumn c  = left.getColumns().get(i);
+                merged.add(new ProjectedColumn(c.getName(), c.getFamilyName(),
+                        c.getPosition(), true, ((ProjectedColumn) c).getSourceColumnRef(), SchemaUtil.isPKColumn(c) ? null : c.getName().getBytes()));
+            }
+        } else {
+            merged.addAll(left.getColumns());
+            if (left.getBucketNum() != null) {
+                merged.remove(0);
+            }
+        }
+        int position = merged.size() + startingPosition;
+        for (PColumn c : right.getColumns()) {
+            if (!SchemaUtil.isPKColumn(c)) {
+                PColumn column = new ProjectedColumn(c.getName(), c.getFamilyName(), 
+                        position++, type == JoinType.Inner ? c.isNullable() : true, 
+                        ((ProjectedColumn) c).getSourceColumnRef(), c.getName().getBytes());
+                merged.add(column);
+            }
+        }
+        return new PTableImpl.Builder()
+                .setType(left.getType())
+                .setState(left.getIndexState())
+                .setTimeStamp(left.getTimeStamp())
+                .setIndexDisableTimestamp(left.getIndexDisableTimestamp())
+                .setSequenceNumber(left.getSequenceNumber())
+                .setImmutableRows(left.isImmutableRows())
+                .setDisableWAL(PTable.DEFAULT_DISABLE_WAL)
+                .setMultiTenant(left.isMultiTenant())
+                .setStoreNulls(left.getStoreNulls())
+                .setViewType(left.getViewType())
+                .setViewIndexIdType(left.getviewIndexIdType())
+                .setViewIndexId(left.getViewIndexId())
+                .setIndexType(left.getIndexType())
+                .setTransactionProvider(left.getTransactionProvider())
+                .setUpdateCacheFrequency(left.getUpdateCacheFrequency())
+                .setNamespaceMapped(left.isNamespaceMapped())
+                .setAutoPartitionSeqName(left.getAutoPartitionSeqName())
+                .setAppendOnlySchema(left.isAppendOnlySchema())
+                .setImmutableStorageScheme(ONE_CELL_PER_COLUMN)
+                .setQualifierEncodingScheme(NON_ENCODED_QUALIFIERS)
+                .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT)
+                .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER)
+                .setUseStatsForParallelization(left.useStatsForParallelization())
+                .setExcludedColumns(ImmutableList.of())
+                .setTenantId(left.getTenantId())
+                .setSchemaName(left.getSchemaName())
+                .setTableName(PNameFactory.newName(SchemaUtil.getTableName(left.getName().getString(),
+                        right.getName().getString())))
+                .setPkName(left.getPKName())
+                .setRowKeyOrderOptimizable(left.rowKeyOrderOptimizable())
+                .setBucketNum(left.getBucketNum())
+                .setIndexes(left.getIndexes() == null ? Collections.emptyList() : left.getIndexes())
+                .setParentSchemaName(left.getParentSchemaName())
+                .setParentTableName(left.getParentTableName())
+                .setPhysicalNames(ImmutableList.<PName>of())
+                .setColumns(merged)
+                .build();
+    }
+
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/KeyPart.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/KeyPart.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/KeyPart.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/KeyPart.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/LimitCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/LimitCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/LimitCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/LimitCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutationPlan.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/MutationPlan.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutationPlan.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
new file mode 100644
index 0000000000..e7f8275738
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.execute.AggregatePlan;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
+import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.FunctionNotFoundException;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PColumnFamily;
+import org.apache.phoenix.schema.SchemaNotFoundException;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.util.EncodedColumnsUtil;
+import org.apache.phoenix.util.ScanUtil;
+import org.apache.phoenix.util.TransactionUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+
+/**
+ * 
+ * Class that compiles plan to update data values after a DDL command
+ * executes.
+ *
+ * TODO: get rid of this ugly code and just go through the standard APIs.
+ * The only time we may still need this is to manage updating the empty
+ * key value, as we sometimes need to "go back through time" to adjust
+ * this.
+ * 
+ * @since 0.1
+ */
+public class PostDDLCompiler {
+    private final PhoenixConnection connection;
+    private final Scan scan;
+
+    public PostDDLCompiler(PhoenixConnection connection) {
+        this(connection, new Scan());
+    }
+
+    public PostDDLCompiler(PhoenixConnection connection, Scan scan) {
+        this.connection = connection;
+        this.scan = scan;
+        scan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, QueryConstants.TRUE);
+    }
+
+    public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList,
+            final long timestamp) throws SQLException {
+        PhoenixStatement statement = new PhoenixStatement(connection);
+        final StatementContext context = new StatementContext(
+                statement,
+            new MultipleTableRefColumnResolver(tableRefs),
+                scan,
+                new SequenceManager(statement));
+        return new PostDDLMutationPlan(context, tableRefs, timestamp, emptyCF, deleteList, projectCFs);
+    }
+
+    private static class MultipleTableRefColumnResolver implements ColumnResolver {
+
+        private final List<TableRef> tableRefs;
+
+        public MultipleTableRefColumnResolver(List<TableRef> tableRefs) {
+            this.tableRefs = tableRefs;
+        }
+
+        @Override
+        public List<TableRef> getTables() {
+            return tableRefs;
+        }
+
+        @Override
+        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public ColumnRef resolveColumn(String schemaName, String tableName, String colName)
+                throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public List<PFunction> getFunctions() {
+            return Collections.<PFunction>emptyList();
+        }
+
+        @Override
+        public PFunction resolveFunction(String functionName)
+            throws SQLException {
+            throw new FunctionNotFoundException(functionName);
+        }
+
+        @Override
+        public boolean hasUDFs() {
+            return false;
+        }
+
+        @Override
+        public PSchema resolveSchema(String schemaName) throws SQLException {
+            throw new SchemaNotFoundException(schemaName);
+        }
+
+        @Override
+        public List<PSchema> getSchemas() {
+            throw new UnsupportedOperationException();
+        }
+
+    }
+
+    private class PostDDLMutationPlan extends BaseMutationPlan {
+
+        private final StatementContext context;
+        private final List<TableRef> tableRefs;
+        private final long timestamp;
+        private final byte[] emptyCF;
+        private final List<PColumn> deleteList;
+        private final List<byte[]> projectCFs;
+
+        public PostDDLMutationPlan(StatementContext context, List<TableRef> tableRefs, long timestamp, byte[] emptyCF, List<PColumn> deleteList, List<byte[]> projectCFs) {
+            super(context, Operation.UPSERT);
+            this.context = context;
+            this.tableRefs = tableRefs;
+            this.timestamp = timestamp;
+            this.emptyCF = emptyCF;
+            this.deleteList = deleteList;
+            this.projectCFs = projectCFs;
+        }
+
+        @Override
+        public MutationState execute() throws SQLException {
+            if (tableRefs.isEmpty()) {
+                return new MutationState(0, 1000, connection);
+            }
+            boolean wasAutoCommit = connection.getAutoCommit();
+            try {
+                connection.setAutoCommit(true);
+                SQLException sqlE = null;
+                /*
+                 * Handles:
+                 * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
+                 * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
+                 * 3) updating the necessary rows to have an empty KV
+                 * 4) updating table stats
+                 */
+                long totalMutationCount = 0;
+                for (final TableRef tableRef : tableRefs) {
+                    Scan scan = ScanUtil.newScan(context.getScan());
+                    SelectStatement select = SelectStatement.COUNT_ONE;
+                    // We need to use this tableRef
+                    ColumnResolver resolver = new SingleTableRefColumnResolver(tableRef);
+                    PhoenixStatement statement = new PhoenixStatement(connection);
+                    StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
+                    long ts = timestamp;
+                    // FIXME: DDL operations aren't transactional, so we're basing the timestamp on a server timestamp.
+                    // Not sure what the fix should be. We don't need conflict detection nor filtering of invalid transactions
+                    // in this case, so maybe this is ok.
+                    if (ts!= HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
+                        ts = TransactionUtil.convertToNanoseconds(ts);
+                    }
+                    ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
+                    if (emptyCF != null) {
+                        scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_CF, emptyCF);
+                        scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
+                    }
+                    ServerCache cache = null;
+                    try {
+                        if (deleteList != null) {
+                            if (deleteList.isEmpty()) {
+                                scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_AGG, QueryConstants.TRUE);
+                                // In the case of a row deletion, add index metadata so mutable secondary indexing works
+                                /* TODO: we currently manually run a scan to delete the index data here
+                                ImmutableBytesWritable ptr = context.getTempPtr();
+                                tableRef.getTable().getIndexMaintainers(ptr);
+                                if (ptr.getLength() > 0) {
+                                    IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
+                                    cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
+                                    byte[] uuidValue = cache.getId();
+                                    scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
+                                }
+                                */
+                            } else {
+                                // In the case of the empty key value column family changing, do not send the index
+                                // metadata, as we're currently managing this from the client. It's possible for the
+                                // data empty column family to stay the same, while the index empty column family
+                                // changes.
+                                PColumn column = deleteList.get(0);
+                                byte[] cq = column.getColumnQualifierBytes();
+                                if (emptyCF == null) {
+                                    scan.addColumn(column.getFamilyName().getBytes(), cq);
+                                }
+                                scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_CF, column.getFamilyName().getBytes());
+                                scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_CQ, cq);
+                            }
+                        }
+                        List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
+                        if (projectCFs == null) {
+                            for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
+                                columnFamilies.add(family.getName().getBytes());
+                            }
+                        } else {
+                            for (byte[] projectCF : projectCFs) {
+                                columnFamilies.add(projectCF);
+                            }
+                        }
+                        // Need to project all column families into the scan, since we haven't yet created our empty key value
+                        RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
+                        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
+                        // Explicitly project these column families and don't project the empty key value,
+                        // since at this point we haven't added the empty key value everywhere.
+                        if (columnFamilies != null) {
+                            scan.getFamilyMap().clear();
+                            for (byte[] family : columnFamilies) {
+                                scan.addFamily(family);
+                            }
+                            projector = new RowProjector(projector,false);
+                        }
+                        // Ignore exceptions due to not being able to resolve any view columns,
+                        // as this just means the view is invalid. Continue on and try to perform
+                        // any other Post DDL operations.
+                        try {
+                            // Since dropping a VIEW does not affect the underlying data, we do
+                            // not need to pass through the view statement here.
+                            WhereCompiler.compile(context, select); // Push where clause into scan
+                        } catch (ColumnFamilyNotFoundException e) {
+                            continue;
+                        } catch (ColumnNotFoundException e) {
+                            continue;
+                        } catch (AmbiguousColumnException e) {
+                            continue;
+                        }
+                        QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null,
+                                OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
+                        try {
+                            ResultIterator iterator = plan.iterator();
+                            try {
+                                Tuple row = iterator.next();
+                                ImmutableBytesWritable ptr = context.getTempPtr();
+                                totalMutationCount += (Long)projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
+                            } catch (SQLException e) {
+                                sqlE = e;
+                            } finally {
+                                try {
+                                    iterator.close();
+                                } catch (SQLException e) {
+                                    if (sqlE == null) {
+                                        sqlE = e;
+                                    } else {
+                                        sqlE.setNextException(e);
+                                    }
+                                } finally {
+                                    if (sqlE != null) {
+                                        throw sqlE;
+                                    }
+                                }
+                            }
+                        } catch (TableNotFoundException e) {
+                            // Ignore and continue, as HBase throws when table hasn't been written to
+                            // FIXME: Remove if this is fixed in 0.96
+                        }
+                    } finally {
+                        if (cache != null) { // Remove server cache if there is one
+                            cache.close();
+                        }
+                    }
+
+                }
+                final long count = totalMutationCount;
+                return new MutationState(1, 1000, connection) {
+                    @Override
+                    public long getUpdateCount() {
+                        return count;
+                    }
+                };
+            } finally {
+                if (!wasAutoCommit) connection.setAutoCommit(wasAutoCommit);
+            }
+        }
+
+        private class SingleTableRefColumnResolver implements ColumnResolver {
+            private final TableRef tableRef;
+
+            public SingleTableRefColumnResolver(TableRef tableRef) {
+                this.tableRef = tableRef;
+            }
+
+            @Override
+            public List<TableRef> getTables() {
+                return Collections.singletonList(tableRef);
+            }
+
+            @Override
+            public List<PFunction> getFunctions() {
+                return Collections.emptyList();
+            }
+
+            ;
+
+            @Override
+            public TableRef resolveTable(String schemaName, String tableName)
+                    throws SQLException {
+                throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+                PColumn column = tableName != null
+                        ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName)
+                        : tableRef.getTable().getColumnForColumnName(colName);
+                return new ColumnRef(tableRef, column.getPosition());
+            }
+
+            @Override
+            public PFunction resolveFunction(String functionName) throws SQLException {
+                throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public boolean hasUDFs() {
+                return false;
+            }
+
+            @Override
+            public List<PSchema> getSchemas() {
+                throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public PSchema resolveSchema(String schemaName) throws SQLException {
+                throw new SchemaNotFoundException(schemaName);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java
new file mode 100644
index 0000000000..01cf620c8d
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.util.ByteUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+/**
+ * For local indexes, we optimize the initial index population by *not* sending
+ * Puts over the wire for the index rows, as we don't need to do that. Instead,
+ * we tap into our region observer to generate the index rows based on the data
+ * rows as we scan
+ */
+public class PostLocalIndexDDLCompiler {
+	private final PhoenixConnection connection;
+    private final String tableName;
+    
+    public PostLocalIndexDDLCompiler(PhoenixConnection connection, String tableName) {
+        this.connection = connection;
+        this.tableName = tableName;
+    }
+
+	public MutationPlan compile(PTable index) throws SQLException {
+		try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
+            String query = "SELECT count(*) FROM " + tableName;
+            final QueryPlan plan = statement.compileQuery(query);
+            TableRef tableRef = plan.getTableRef();
+            Scan scan = plan.getContext().getScan();
+            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+            final PTable dataTable = tableRef.getTable();
+            List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
+            for (PTable indexTable : dataTable.getIndexes()) {
+                if (indexTable.getKey().equals(index.getKey())) {
+                    index = indexTable;
+                    break;
+                }
+            }
+            // Only build newly created index.
+            indexes.add(index);
+            IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection());
+            // Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
+            // We'll detect that this attribute was set the server-side and write the index
+            // rows per region as a result. The value of the attribute will be our persisted
+            // index maintainers.
+            // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
+            scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
+            // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
+            // However, in this case, we need to project all of the data columns that contribute to the index.
+            IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
+            for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
+                if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
+                    scan.addFamily(columnRef.getFamily());
+                } else {
+                    scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
+                }
+            }
+            if (dataTable.isTransactional()) {
+                scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, connection.getMutationState().encodeTransaction());
+            }
+
+            // Go through MutationPlan abstraction so that we can create local indexes
+            // with a connectionless connection (which makes testing easier).
+            return new PostLocalIndexDDLMutationPlan(plan, dataTable);
+    }
+  }
+
+  private class PostLocalIndexDDLMutationPlan extends BaseMutationPlan {
+
+    private final QueryPlan plan;
+    private final PTable dataTable;
+
+    private PostLocalIndexDDLMutationPlan(QueryPlan plan, PTable dataTable) {
+        super(plan.getContext(), Operation.UPSERT);
+        this.plan = plan;
+        this.dataTable = dataTable;
+    }
+
+    @Override
+    public MutationState execute() throws SQLException {
+        connection.getMutationState().commitDDLFence(dataTable);
+        Tuple tuple = plan.iterator().next();
+        long rowCount = 0;
+        if (tuple != null) {
+            Cell kv = tuple.getValue(0);
+            ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(),
+              kv.getValueOffset(), kv.getValueLength());
+            // A single Cell will be returned with the count(*) - we decode that here
+            rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault());
+        }
+        // The contract is to return a MutationState that contains the number of rows modified.
+        // In this case, it's the number of rows in the data table which corresponds to the
+        // number of index rows that were added.
+        return new MutationState(0, 0, connection, rowCount);
+    }
+
+  }
+
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
new file mode 100644
index 0000000000..5bca06cf98
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -0,0 +1,796 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.apache.phoenix.query.QueryServices.WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB;
+import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.expression.BaseTerminalExpression;
+import org.apache.phoenix.expression.CoerceExpression;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.ProjectedColumnExpression;
+import org.apache.phoenix.expression.SingleCellColumnExpression;
+import org.apache.phoenix.expression.function.ArrayIndexFunction;
+import org.apache.phoenix.expression.visitor.ExpressionVisitor;
+import org.apache.phoenix.expression.visitor.ProjectedColumnExpressionVisitor;
+import org.apache.phoenix.expression.visitor.ReplaceArrayFunctionExpressionVisitor;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.BindParseNode;
+import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.parse.FamilyWildcardParseNode;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.PhoenixRowTimestampParseNode;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.SequenceValueParseNode;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.parse.TableWildcardParseNode;
+import org.apache.phoenix.parse.WildcardParseNode;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.ArgumentTypeMismatchException;
+import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.IndexUncoveredDataColumnRef;
+import org.apache.phoenix.schema.KeyValueSchema;
+import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PColumnFamily;
+import org.apache.phoenix.schema.PDatum;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
+import org.apache.phoenix.schema.RowKeySchema;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.ValueBitSet;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.SizedUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+
+/**
+ * 
+ * Class that iterates through expressions in SELECT clause and adds projected
+ * columns to scan.
+ *
+ * 
+ * @since 0.1
+ */
+public class ProjectionCompiler {
+    private static final Expression NULL_EXPRESSION = LiteralExpression.newConstant(null);
+    private ProjectionCompiler() {
+    }
+    
+    private static void projectColumnFamily(PTable table, Scan scan, byte[] family) {
+        // Will project all colmuns for given CF
+        scan.addFamily(family);
+    }
+    
+    public static RowProjector compile(StatementContext context, SelectStatement statement, GroupBy groupBy) throws SQLException  {
+        boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices()
+                .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB,
+                        DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB);
+        return compile(context, statement, groupBy, Collections.<PColumn>emptyList(),
+                // Pass null expression because we don't want empty key value to be projected
+                NULL_EXPRESSION,
+                wildcardIncludesDynamicCols);
+    }
+    
+    private static int getMinPKOffset(PTable table, PName tenantId) {
+        // In SELECT *, don't include tenant column or index ID column for tenant connection
+        int posOffset = table.getBucketNum() == null ? 0 : 1;
+        if (table.isMultiTenant() && tenantId != null) {
+            posOffset++;
+        }
+        if (table.getViewIndexId() != null) {
+            posOffset++;
+        }
+        return posOffset;
+    }
+    
+    private static void projectAllTableColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns, List<? extends PDatum> targetColumns) throws SQLException {
+        ColumnResolver resolver = context.getResolver();
+        PTable table = tableRef.getTable();
+        int projectedOffset = projectedExpressions.size();
+        int posOffset = table.getBucketNum() == null ? 0 : 1;
+        int minPKOffset = getMinPKOffset(table, context.getConnection().getTenantId());
+        for (int i = posOffset, j = posOffset; i < table.getColumns().size(); i++) {
+            PColumn column = table.getColumns().get(i);
+            // Skip tenant ID column (which may not be the first column, but is the first PK column)
+            if (SchemaUtil.isPKColumn(column) && j++ < minPKOffset) {
+                posOffset++;
+                continue;
+            }
+            ColumnRef ref = new ColumnRef(tableRef,i);
+            String colName = ref.getColumn().getName().getString();
+            String tableAlias = tableRef.getTableAlias();
+            if (resolveColumn) {
+                try {
+                    if (tableAlias != null) {
+                        ref = resolver.resolveColumn(null, tableAlias, colName);
+                    } else {
+                        String schemaName = table.getSchemaName().getString();
+                        ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, table.getTableName().getString(), colName);
+                    }
+                    // The freshly revolved column's family better be the same as the original one.
+                    // If not, trigger the disambiguation logic. Also see PTableImpl.getColumnForColumnName(...)
+                    if (column.getFamilyName() != null && !column.getFamilyName().equals(ref.getColumn().getFamilyName())) {
+                        throw new AmbiguousColumnException();
+                    }
+                } catch (AmbiguousColumnException e) {
+                    if (column.getFamilyName() != null) {
+                        ref = resolver.resolveColumn(tableAlias != null ? tableAlias : table.getTableName().getString(), column.getFamilyName().getString(), colName);
+                    } else {
+                        throw e;
+                    }
+                }
+            }
+            Expression expression = ref.newColumnExpression();
+            expression = coerceIfNecessary(i-posOffset+projectedOffset, targetColumns, expression);
+            ImmutableBytesWritable ptr = context.getTempPtr();
+            if (IndexUtil.getViewConstantValue(column, ptr)) {
+                expression = LiteralExpression.newConstant(
+                        column.getDataType().toObject(ptr, column.getSortOrder()),
+                        expression.getDataType(),
+                        column.getSortOrder());
+            }
+            projectedExpressions.add(expression);
+            boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
+            projectedColumns.add(new ExpressionProjector(colName, colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
+        }
+    }
+    
+    private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns, List<? extends PDatum> targetColumns) throws SQLException {
+        ColumnResolver resolver = context.getResolver();
+        PTable index = tableRef.getTable();
+        int projectedOffset = projectedExpressions.size();
+        PhoenixConnection conn = context.getConnection();
+        PName tenantId = conn.getTenantId();
+        String dataTableName = index.getParentName().getString();
+        PTable dataTable = null;
+        try {
+            dataTable = conn.getTable(new PTableKey(tenantId, dataTableName));
+        } catch (TableNotFoundException e) {
+            if (tenantId != null) {
+                // Check with null tenantId
+                dataTable = conn.getTable(new PTableKey(null, dataTableName));
+            }
+            else {
+                throw e;
+            }
+        }
+        int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
+        int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
+        int minIndexPKOffset = getMinPKOffset(index, tenantId);
+        if (!IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) {
+            if (index.getColumns().size()-minIndexPKOffset != dataTable.getColumns().size()-minTablePKOffset) {
+                // We'll end up not using this by the optimizer, so just throw
+                String schemaNameStr = dataTable.getSchemaName()==null?null:dataTable.getSchemaName().getString();
+                String tableNameStr = dataTable.getTableName()==null?null:dataTable.getTableName().getString();
+                throw new ColumnNotFoundException(schemaNameStr, tableNameStr,null, WildcardParseNode.INSTANCE.toString());
+            }
+        }
+        // At this point, the index table is either fully covered, or we are projecting uncovered
+        // columns
+        // The easy thing would be to just call projectAllTableColumns on the projected table,
+        // but its columns are not in the same order as the data column, so we have to map them to
+        // the data column order
+        TableRef projectedTableRef =
+                new TableRef(resolver.getTables().get(0), tableRef.getTableAlias());
+        for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) {
+            PColumn column = dataTable.getColumns().get(i);
+            // Skip tenant ID column (which may not be the first column, but is the first PK column)
+            if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) {
+                tableOffset++;
+                continue;
+            }
+            PColumn dataTableColumn = dataTable.getColumns().get(i);
+            String indexColName = IndexUtil.getIndexColumnName(dataTableColumn);
+            PColumn indexColumn = null;
+            ColumnRef ref = null;
+            try {
+                indexColumn = index.getColumnForColumnName(indexColName);
+                //TODO could should we do this more efficiently than catching the expcetion ?
+            } catch (ColumnNotFoundException e) {
+                if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) {
+                    //Projected columns have the same name as in the data table
+                    String familyName =
+                            dataTableColumn.getFamilyName() == null ? null
+                                    : dataTableColumn.getFamilyName().getString();
+                    ref =
+                            resolver.resolveColumn(familyName,
+                                tableRef.getTableAlias() == null
+                                        ? tableRef.getTable().getName().getString()
+                                        : tableRef.getTableAlias(),
+                                indexColName);
+                    indexColumn = ref.getColumn();
+                } else {
+                    throw e;
+                }
+            }
+            ref = new ColumnRef(projectedTableRef, indexColumn.getPosition());
+            String colName = dataTableColumn.getName().getString();
+            String tableAlias = tableRef.getTableAlias();
+            if (resolveColumn) {
+                try {
+                    if (tableAlias != null) {
+                        ref = resolver.resolveColumn(null, tableAlias, indexColName);
+                    } else {
+                        String schemaName = index.getSchemaName().getString();
+                        ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
+                    }
+                } catch (AmbiguousColumnException e) {
+                    if (indexColumn.getFamilyName() != null) {
+                        ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName);
+                    } else {
+                        throw e;
+                    }
+                }
+            }
+            Expression expression = ref.newColumnExpression();
+            expression = coerceIfNecessary(i-tableOffset+projectedOffset, targetColumns, expression);
+            // We do not need to check if the column is a viewConstant, because view constants never
+            // appear as a column in an index
+            projectedExpressions.add(expression);
+            boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
+            ExpressionProjector projector = new ExpressionProjector(colName, colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
+            projectedColumns.add(projector);
+        }
+    }
+    
+    private static void projectTableColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
+        PTable table = tableRef.getTable();
+        PColumnFamily pfamily = table.getColumnFamily(cfName);
+        for (PColumn column : pfamily.getColumns()) {
+            ColumnRef ref = new ColumnRef(tableRef, column.getPosition());
+            if (resolveColumn) {
+                ref = context.getResolver().resolveColumn(table.getTableName().getString(), cfName, column.getName().getString());
+            }
+            Expression expression = ref.newColumnExpression();
+            projectedExpressions.add(expression);
+            String colName = column.getName().toString();
+            boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
+            projectedColumns.add(new ExpressionProjector(colName, colName, tableRef.getTableAlias() == null ?
+                    table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
+        }
+    }
+
+    private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
+        ColumnResolver resolver = context.getResolver();
+        PTable index = tableRef.getTable();
+        PhoenixConnection conn = context.getConnection();
+        String dataTableName = index.getParentName().getString();
+        PTable dataTable = conn.getTable(new PTableKey(conn.getTenantId(), dataTableName));
+        PColumnFamily pfamily = dataTable.getColumnFamily(cfName);
+        TableRef projectedTableRef =
+                new TableRef(resolver.getTables().get(0), tableRef.getTableAlias());
+        PTable projectedIndex = projectedTableRef.getTable();
+        for (PColumn column : pfamily.getColumns()) {
+            String indexColName = IndexUtil.getIndexColumnName(column);
+            PColumn indexColumn = null;
+            ColumnRef ref = null;
+            String indexColumnFamily = null;
+            try {
+                indexColumn = index.getColumnForColumnName(indexColName);
+                ref = new ColumnRef(projectedTableRef, indexColumn.getPosition());
+                indexColumnFamily =
+                        indexColumn.getFamilyName() == null ? null
+                                : indexColumn.getFamilyName().getString();
+            } catch (ColumnNotFoundException e) {
+                if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) {
+                    try {
+                        //Projected columns have the same name as in the data table
+                        String colName = column.getName().getString();
+                        String familyName =
+                                column.getFamilyName() == null ? null
+                                        : column.getFamilyName().getString();
+                        resolver.resolveColumn(familyName,
+                            tableRef.getTableAlias() == null
+                                    ? tableRef.getTable().getName().getString()
+                                    : tableRef.getTableAlias(),
+                            indexColName);
+                        indexColumn = projectedIndex.getColumnForColumnName(colName);
+                    } catch (ColumnFamilyNotFoundException c) {
+                        throw e;
+                    }
+                } else {
+                    throw e;
+                }
+            }
+            if (resolveColumn) {
+                ref =
+                        resolver.resolveColumn(index.getTableName().getString(), indexColumnFamily,
+                            indexColName);
+            }
+            Expression expression = ref.newColumnExpression();
+            projectedExpressions.add(expression);
+            String colName = column.getName().toString();
+            boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
+            projectedColumns.add(new ExpressionProjector(colName, colName,
+                    tableRef.getTableAlias() == null ? dataTable.getName().getString()
+                            : tableRef.getTableAlias(),
+                    expression, isCaseSensitive));
+        }
+    }
+    
+    private static Expression coerceIfNecessary(int index, List<? extends PDatum> targetColumns, Expression expression) throws SQLException {
+        if (index < targetColumns.size()) {
+            PDatum targetColumn = targetColumns.get(index);
+            if (targetColumn.getDataType() != expression.getDataType()) {
+                PDataType targetType = targetColumn.getDataType();
+                // Check if coerce allowed using more relaxed isCastableTo check, since we promote INTEGER to LONG 
+                // during expression evaluation and then convert back to INTEGER on UPSERT SELECT (and we don't have
+                // (an actual value we can specifically check against).
+                if (expression.getDataType() != null && !expression.getDataType().isCastableTo(targetType)) {
+                    throw new ArgumentTypeMismatchException(targetType, expression.getDataType(), "column: " + targetColumn);
+                }
+                expression = CoerceExpression.create(expression, targetType, targetColumn.getSortOrder(), targetColumn.getMaxLength());
+            }
+        }
+        return expression;
+    }
+    /**
+     * Builds the projection for the scan
+     * @param context query context kept between compilation of different query clauses
+     * @param statement the statement being compiled
+     * @param groupBy compiled GROUP BY clause
+     * @param targetColumns list of columns, parallel to aliasedNodes, that are being set for an
+     * UPSERT SELECT statement. Used to coerce expression types to the expected target type.
+     * @param where the where clause expression
+     * @param wildcardIncludesDynamicCols true if wildcard queries should include dynamic columns
+     * @return projector used to access row values during scan
+     * @throws SQLException 
+     */
+    public static RowProjector compile(StatementContext context, SelectStatement statement,
+            GroupBy groupBy, List<? extends PDatum> targetColumns, Expression where,
+            boolean wildcardIncludesDynamicCols) throws SQLException {
+        List<KeyValueColumnExpression> arrayKVRefs = new ArrayList<>();
+        List<ProjectedColumnExpression> arrayProjectedColumnRefs = new ArrayList<>();
+        List<Expression> arrayKVFuncs = new ArrayList<>();
+        List<Expression> arrayOldFuncs = new ArrayList<>();
+        Map<Expression, Integer> arrayExpressionCounts = new HashMap<>();
+        List<AliasedNode> aliasedNodes = statement.getSelect();
+        // Setup projected columns in Scan
+        SelectClauseVisitor selectVisitor = new SelectClauseVisitor(context, groupBy, arrayKVRefs,
+                arrayKVFuncs, arrayExpressionCounts, arrayProjectedColumnRefs, arrayOldFuncs,
+                statement);
+        List<ExpressionProjector> projectedColumns = new ArrayList<>();
+        ColumnResolver resolver = context.getResolver();
+        TableRef tableRef = context.getCurrentTable();
+        PTable table = tableRef.getTable();
+        boolean resolveColumn = !tableRef.equals(resolver.getTables().get(0));
+        boolean isWildcard = false;
+        Scan scan = context.getScan();
+        int index = 0;
+        List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
+        List<byte[]> projectedFamilies = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
+        for (AliasedNode aliasedNode : aliasedNodes) {
+            ParseNode node = aliasedNode.getNode();
+            // TODO: visitor?
+            if (node instanceof WildcardParseNode) {
+                if (statement.isAggregate()) {
+                    ExpressionCompiler.throwNonAggExpressionInAggException(node.toString());
+                }
+                if (tableRef == TableRef.EMPTY_TABLE_REF) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException();
+                }
+                isWildcard = true;
+                if (tableRef.getTable().getType() == PTableType.INDEX && ((WildcardParseNode)node).isRewrite()) {
+                    projectAllIndexColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns);
+                } else {
+                    projectAllTableColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns);
+                }
+            } else if (node instanceof TableWildcardParseNode) {
+                TableName tName = ((TableWildcardParseNode) node).getTableName();
+                TableRef tRef = resolver.resolveTable(tName.getSchemaName(), tName.getTableName());
+                if (tRef.equals(tableRef)) {
+                    isWildcard = true;
+                }
+                if (tRef.getTable().getType() == PTableType.INDEX && ((TableWildcardParseNode)node).isRewrite()) {
+                    projectAllIndexColumns(context, tRef, true, projectedExpressions, projectedColumns, targetColumns);
+                } else {
+                    projectAllTableColumns(context, tRef, true, projectedExpressions, projectedColumns, targetColumns);
+                }
+            } else if (node instanceof  FamilyWildcardParseNode) {
+                if (tableRef == TableRef.EMPTY_TABLE_REF) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException();
+                }
+                // Project everything for SELECT cf.*
+                String cfName = ((FamilyWildcardParseNode) node).getName();
+                // Delay projecting to scan, as when any other column in the column family gets
+                // added to the scan, it overwrites that we want to project the entire column
+                // family. Instead, we do the projection at the end.
+                // TODO: consider having a ScanUtil.addColumn and ScanUtil.addFamily to work
+                // around this, as this code depends on this function being the last place where
+                // columns are projected (which is currently true, but could change).
+                projectedFamilies.add(Bytes.toBytes(cfName));
+                if (tableRef.getTable().getType() == PTableType.INDEX && ((FamilyWildcardParseNode)node).isRewrite()) {
+                    projectIndexColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns);
+                } else {
+                    projectTableColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns);
+                }
+            } else {
+                if (node instanceof PhoenixRowTimestampParseNode) {
+                    if (statement.isAggregate()) {
+                        ExpressionCompiler.throwNonAggExpressionInAggException(node.toString());
+                    }
+                }
+                Expression expression = node.accept(selectVisitor);
+                projectedExpressions.add(expression);
+                expression = coerceIfNecessary(index, targetColumns, expression);
+                if (node instanceof BindParseNode) {
+                    context.getBindManager().addParamMetaData((BindParseNode)node, expression);
+                }
+                if (!node.isStateless()) {
+                    if (!selectVisitor.isAggregate() && statement.isAggregate()) {
+                        ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
+                    }
+                }
+
+                String tableName = tableRef.getTableAlias() == null ?
+                        (table.getName() == null ?
+                                "" :
+                                table.getName().getString()) :
+                        tableRef.getTableAlias();
+                String colName = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias());
+                String name = colName == null ? expression.toString() : colName;
+                boolean isCaseSensitive = aliasedNode.getAlias() != null ?
+                        aliasedNode.isCaseSensitve() :
+                        (colName != null ?
+                                SchemaUtil.isCaseSensitive(aliasedNode.getNode().getAlias()) :
+                                selectVisitor.isCaseSensitive);
+                if (null != aliasedNode.getAlias()){
+                    projectedColumns.add(new ExpressionProjector(name, aliasedNode.getAlias(), tableName, expression, isCaseSensitive));
+                } else {
+                    projectedColumns.add(new ExpressionProjector(name, name, tableName, expression, isCaseSensitive));
+                }
+            }
+
+            selectVisitor.reset();
+            index++;
+        }
+
+        for (int i = arrayProjectedColumnRefs.size() - 1; i >= 0; i--) {
+            Expression expression = arrayProjectedColumnRefs.get(i);
+            Integer count = arrayExpressionCounts.get(expression);
+            if (count != 0) {
+                arrayKVRefs.remove(i);
+                arrayKVFuncs.remove(i);
+                arrayOldFuncs.remove(i);
+            }
+        }
+
+        if (arrayKVFuncs.size() > 0 && arrayKVRefs.size() > 0) {
+            serailizeArrayIndexInformationAndSetInScan(context, arrayKVFuncs, arrayKVRefs);
+            KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
+            for (Expression expression : arrayKVRefs) {
+                builder.addField(expression);
+            }
+            KeyValueSchema kvSchema = builder.build();
+            ValueBitSet arrayIndexesBitSet = ValueBitSet.newInstance(kvSchema);
+            builder = new KeyValueSchemaBuilder(0);
+            for (Expression expression : arrayKVFuncs) {
+                builder.addField(expression);
+            }
+            KeyValueSchema arrayIndexesSchema = builder.build();
+
+            Map<Expression, Expression> replacementMap = new HashMap<>();
+            for(int i = 0; i < arrayOldFuncs.size(); i++){
+                Expression function =arrayKVFuncs.get(i);
+                replacementMap.put(arrayOldFuncs.get(i), new ArrayIndexExpression(i, function.getDataType(), arrayIndexesBitSet, arrayIndexesSchema));
+            }
+
+            ReplaceArrayFunctionExpressionVisitor visitor = new ReplaceArrayFunctionExpressionVisitor(replacementMap);
+            for (int i = 0; i < projectedColumns.size(); i++) {
+                ExpressionProjector projector = projectedColumns.get(i);
+                projectedColumns.set(i, new ExpressionProjector(projector.getName(),
+                        projector.getLabel(),
+                        tableRef.getTableAlias() == null ? (table.getName() == null ? "" : table.getName().getString()) : tableRef.getTableAlias(), projector.getExpression().accept(visitor), projector.isCaseSensitive()));
+            }
+        }
+
+        boolean isProjectEmptyKeyValue = false;
+        // Don't project known/declared column families into the scan if we want to support
+        // surfacing dynamic columns in wildcard queries
+        if (isWildcard && !wildcardIncludesDynamicCols) {
+            projectAllColumnFamilies(table, scan);
+        } else {
+            isProjectEmptyKeyValue = where == null || LiteralExpression.isTrue(where) || where.requiresFinalEvaluation();
+            for (byte[] family : projectedFamilies) {
+                try {
+                    if (table.getColumnFamily(family) != null) {
+                        projectColumnFamily(table, scan, family);
+                    }
+                } catch (ColumnFamilyNotFoundException e) {
+                    if (!IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) {
+                        throw e;
+                    }
+                }
+            }
+        }
+        
+        // TODO make estimatedByteSize more accurate by counting the joined columns.
+        int estimatedKeySize = table.getRowKeySchema().getEstimatedValueLength();
+        int estimatedByteSize = 0;
+        for (Map.Entry<byte[],NavigableSet<byte[]>> entry : scan.getFamilyMap().entrySet()) {
+            try {
+                PColumnFamily family = table.getColumnFamily(entry.getKey());
+                if (entry.getValue() == null) {
+                    for (PColumn column : family.getColumns()) {
+                        Integer maxLength = column.getMaxLength();
+                        int byteSize = column.getDataType().isFixedWidth() ? maxLength == null ? column.getDataType().getByteSize() : maxLength : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
+                        estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize;
+                    }
+                } else {
+                    for (byte[] cq : entry.getValue()) {
+                            PColumn column = family.getPColumnForColumnQualifier(cq);
+                            // Continue: If an EMPTY_COLUMN is in the projection list,
+                            // since the table column list does not contain the EMPTY_COLUMN
+                            // no value is returned.
+                            if (column == null) {
+                                continue;
+                            }
+                            Integer maxLength = column.getMaxLength();
+                            int byteSize = column.getDataType().isFixedWidth() ? maxLength == null ? column.getDataType().getByteSize() : maxLength : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
+                            estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize;
+                        }
+                }
+            } catch (ColumnFamilyNotFoundException e) {
+                // Ignore as this can happen for local indexes when the data table has a column family, but there are no covered columns in the family
+            }
+        }
+        return new RowProjector(projectedColumns, Math.max(estimatedKeySize, estimatedByteSize),
+                isProjectEmptyKeyValue, resolver.hasUDFs(), isWildcard,
+                wildcardIncludesDynamicCols);
+    }
+
+    private static void projectAllColumnFamilies(PTable table, Scan scan) {
+        // Will project all known/declared column families
+        scan.getFamilyMap().clear();
+        for (PColumnFamily family : table.getColumnFamilies()) {
+            scan.addFamily(family.getName().getBytes());
+        }
+    }
+
+    // A replaced ArrayIndex function that retrieves the exact array value retrieved from the server
+    static class ArrayIndexExpression extends BaseTerminalExpression {
+        private final int position;
+        private final PDataType type;
+        private final ValueBitSet arrayIndexesBitSet;
+        private final KeyValueSchema arrayIndexesSchema;
+
+        public ArrayIndexExpression(int position, PDataType type, ValueBitSet arrayIndexesBitSet, KeyValueSchema arrayIndexesSchema) {
+            this.position = position;
+            this.type =  type;
+            this.arrayIndexesBitSet = arrayIndexesBitSet;
+            this.arrayIndexesSchema = arrayIndexesSchema;
+        }
+
+        @Override
+        public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+            if (!tuple.getValue(QueryConstants.ARRAY_VALUE_COLUMN_FAMILY, QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER,
+                    ptr)) { 
+              return false;
+            }
+            int maxOffset = ptr.getOffset() + ptr.getLength();
+            arrayIndexesBitSet.or(ptr);
+            arrayIndexesSchema.iterator(ptr, position, arrayIndexesBitSet);
+            Boolean hasValue = arrayIndexesSchema.next(ptr, position, maxOffset, arrayIndexesBitSet);
+            arrayIndexesBitSet.clear();
+            if (hasValue == null) {
+                ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+            }
+            return true;
+        }
+
+        @Override
+        public PDataType getDataType() {
+            return this.type;
+        }
+
+        @Override
+        public <T> T accept(ExpressionVisitor<T> visitor) {
+            // TODO Auto-generated method stub
+            return null;
+        }
+    }
+    private static void serailizeArrayIndexInformationAndSetInScan(StatementContext context, List<Expression> arrayKVFuncs,
+            List<KeyValueColumnExpression> arrayKVRefs) {
+        ByteArrayOutputStream stream = new ByteArrayOutputStream();
+        try {
+            DataOutputStream output = new DataOutputStream(stream);
+            // Write the arrayKVRef size followed by the keyvalues that needs to be of type arrayindex function
+            WritableUtils.writeVInt(output, arrayKVRefs.size());
+            for (Expression expression : arrayKVRefs) {
+                    expression.write(output);
+            }
+            // then write the number of arrayindex functions followeed by the expression itself
+            WritableUtils.writeVInt(output, arrayKVFuncs.size());
+            for (Expression expression : arrayKVFuncs) {
+                    expression.write(output);
+            }
+            
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        } finally {
+            try {
+                stream.close();
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+        }
+        context.getScan().setAttribute(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX, stream.toByteArray());
+    }
+
+    private static class SelectClauseVisitor extends ExpressionCompiler {
+
+        /**
+         * Track whether or not the projection expression is case sensitive. We use this
+         * information to determine whether or not we normalize the column name passed
+         */
+        private boolean isCaseSensitive;
+        private int elementCount;
+        private List<KeyValueColumnExpression> arrayKVRefs;
+        private List<Expression> arrayKVFuncs;
+        private List<Expression> arrayOldFuncs;
+        private List<ProjectedColumnExpression> arrayProjectedColumnRefs;
+        private Map<Expression, Integer> arrayExpressionCounts;
+        private SelectStatement statement; 
+        
+        private SelectClauseVisitor(StatementContext context, GroupBy groupBy, 
+                List<KeyValueColumnExpression> arrayKVRefs, List<Expression> arrayKVFuncs, Map<Expression, Integer> arrayExpressionCounts, List<ProjectedColumnExpression> arrayProjectedColumnRefs, List<Expression> arrayOldFuncs, SelectStatement statement) {
+            super(context, groupBy);
+            this.arrayKVRefs = arrayKVRefs;
+            this.arrayKVFuncs = arrayKVFuncs;
+            this.arrayOldFuncs = arrayOldFuncs;
+            this.arrayExpressionCounts = arrayExpressionCounts;
+            this.arrayProjectedColumnRefs = arrayProjectedColumnRefs;
+            this.statement = statement;
+            reset();
+        }
+
+        @Override
+        public void reset() {
+            super.reset();
+            elementCount = 0;
+            isCaseSensitive = true;
+        }
+        
+        @Override
+        protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException {
+            ColumnRef ref = super.resolveColumn(node);
+            isCaseSensitive = isCaseSensitive && node.isCaseSensitive();
+            return ref;
+        }
+
+        @Override
+        public Expression visit(ColumnParseNode node) throws SQLException {
+            Expression expression = super.visit(node);
+            if (expression.getDataType().isArrayType()) {
+                Integer count = arrayExpressionCounts.get(expression);
+                arrayExpressionCounts.put(expression, count != null ? (count + 1) : 1);
+            }
+            return expression;
+        }
+        
+        @Override
+        public void addElement(List<Expression> l, Expression element) {
+            elementCount++;
+            isCaseSensitive &= elementCount == 1;
+            super.addElement(l, element);
+        }
+        
+        @Override
+        public Expression visit(SequenceValueParseNode node) throws SQLException {
+            if (aggregateFunction != null) {
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR)
+                .setSchemaName(node.getTableName().getSchemaName())
+                .setTableName(node.getTableName().getTableName()).build().buildException();
+            }
+            return context.getSequenceManager().newSequenceReference(node);
+        }
+        
+        @Override
+        public Expression visitLeave(FunctionParseNode node, final List<Expression> children) throws SQLException {
+
+            // this need not be done for group by clause with array. Hence the below check
+            if (!statement.isAggregate() && ArrayIndexFunction.NAME.equals(node.getName()) && children.get(0) instanceof ProjectedColumnExpression) {
+                 final List<KeyValueColumnExpression> indexKVs = Lists.newArrayList();
+                 final List<ProjectedColumnExpression> indexProjectedColumns = Lists.newArrayList();
+                 final List<Expression> copyOfChildren = new ArrayList<>(children);
+                 // Create anon visitor to find reference to array in a generic way
+                 children.get(0).accept(new ProjectedColumnExpressionVisitor() {
+                     @Override
+                     public Void visit(ProjectedColumnExpression expression) {
+                         if (expression.getDataType().isArrayType()) {
+                             indexProjectedColumns.add(expression);
+                             PColumn col = expression.getColumn();
+                             // hack'ish... For covered columns with local indexes we defer to the server.
+                             if (col instanceof ProjectedColumn && ((ProjectedColumn) col)
+                                     .getSourceColumnRef() instanceof IndexUncoveredDataColumnRef) {
+                                 return null;
+                             }
+                             PTable table = context.getCurrentTable().getTable();
+                             KeyValueColumnExpression keyValueColumnExpression;
+                             if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
+                                keyValueColumnExpression =
+                                        new SingleCellColumnExpression(col,
+                                                col.getName().getString(),
+                                                table.getEncodingScheme(),
+                                                table.getImmutableStorageScheme());
+                             } else {
+                                 keyValueColumnExpression = new KeyValueColumnExpression(col);
+                             }
+                             indexKVs.add(keyValueColumnExpression);
+                             copyOfChildren.set(0, keyValueColumnExpression);
+                             Integer count = arrayExpressionCounts.get(expression);
+                             arrayExpressionCounts.put(expression, count != null ? (count - 1) : -1);
+                         }
+                         return null;
+                     }
+                 });
+
+                 Expression func = super.visitLeave(node,children);
+                 // Add the keyvalues which is of type array
+                 if (!indexKVs.isEmpty()) {
+                    arrayKVRefs.addAll(indexKVs);
+                    arrayProjectedColumnRefs.addAll(indexProjectedColumns);
+                    Expression funcModified = super.visitLeave(node, copyOfChildren);
+                    // Track the array index function also
+                    arrayKVFuncs.add(funcModified);
+                    arrayOldFuncs.add(func);
+                }
+                return func;
+            } else {
+                return super.visitLeave(node,children);
+            }
+        }
+    }
+}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
new file mode 100644
index 0000000000..168b404379
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -0,0 +1,814 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.apache.phoenix.query.QueryServices.WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB;
+import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB;
+
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.phoenix.thirdparty.com.google.common.base.Optional;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.JoinCompiler.JoinSpec;
+import org.apache.phoenix.compile.JoinCompiler.JoinTable;
+import org.apache.phoenix.compile.JoinCompiler.Table;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.execute.AggregatePlan;
+import org.apache.phoenix.execute.BaseQueryPlan;
+import org.apache.phoenix.execute.ClientAggregatePlan;
+import org.apache.phoenix.execute.ClientScanPlan;
+import org.apache.phoenix.execute.HashJoinPlan;
+import org.apache.phoenix.execute.HashJoinPlan.HashSubPlan;
+import org.apache.phoenix.execute.HashJoinPlan.WhereClauseSubPlan;
+import org.apache.phoenix.execute.LiteralResultIterationPlan;
+import org.apache.phoenix.execute.ScanPlan;
+import org.apache.phoenix.execute.SortMergeJoinPlan;
+import org.apache.phoenix.execute.TupleProjectionPlan;
+import org.apache.phoenix.execute.TupleProjector;
+import org.apache.phoenix.execute.UnionPlan;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.iterate.ParallelIteratorFactory;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.join.HashJoinInfo;
+import org.apache.phoenix.optimize.Cost;
+import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.EqualParseNode;
+import org.apache.phoenix.parse.HintNode.Hint;
+import org.apache.phoenix.parse.JoinTableNode.JoinType;
+import org.apache.phoenix.parse.OrderByNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.SQLParser;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.SubqueryParseNode;
+import org.apache.phoenix.parse.TableNode;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.PDatum;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.RowValueConstructorOffsetNotCoercibleException;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.ParseNodeUtil;
+import org.apache.phoenix.util.ParseNodeUtil.RewriteResult;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ScanUtil;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Sets;
+
+
+/**
+ *
+ * Class used to build an executable query plan
+ *
+ *
+ * @since 0.1
+ */
+public class QueryCompiler {
+    private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory();
+    private final PhoenixStatement statement;
+    private final Scan scan;
+    private final Scan originalScan;
+    private final ColumnResolver resolver;
+    private final BindManager bindManager;
+    private final SelectStatement select;
+    private final List<? extends PDatum> targetColumns;
+    private final ParallelIteratorFactory parallelIteratorFactory;
+    private final SequenceManager sequenceManager;
+    private final boolean projectTuples;
+    private final boolean noChildParentJoinOptimization;
+    private final boolean usePersistentCache;
+    private final boolean optimizeSubquery;
+    private final Map<TableRef, QueryPlan> dataPlans;
+    private final boolean costBased;
+
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, boolean projectTuples, boolean optimizeSubquery, Map<TableRef, QueryPlan> dataPlans) throws SQLException {
+        this(statement, select, resolver, Collections.<PDatum>emptyList(), null, new SequenceManager(statement), projectTuples, optimizeSubquery, dataPlans);
+    }
+
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, BindManager bindManager, boolean projectTuples, boolean optimizeSubquery, Map<TableRef, QueryPlan> dataPlans) throws SQLException {
+        this(statement, select, resolver, bindManager, Collections.<PDatum>emptyList(), null, new SequenceManager(statement), projectTuples, optimizeSubquery, dataPlans);
+    }
+
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map<TableRef, QueryPlan> dataPlans) throws SQLException {
+        this(statement, select, resolver, new BindManager(statement.getParameters()), targetColumns, parallelIteratorFactory, sequenceManager, projectTuples, optimizeSubquery, dataPlans);
+    }
+
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, BindManager bindManager, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map<TableRef, QueryPlan> dataPlans) throws SQLException {
+        this.statement = statement;
+        this.select = select;
+        this.resolver = resolver;
+        this.bindManager = bindManager;
+        this.scan = new Scan();
+        this.targetColumns = targetColumns;
+        this.parallelIteratorFactory = parallelIteratorFactory;
+        this.sequenceManager = sequenceManager;
+        this.projectTuples = projectTuples;
+        this.noChildParentJoinOptimization = select.getHint().hasHint(Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION) || select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE);
+        this.usePersistentCache = select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE);
+        ConnectionQueryServices services = statement.getConnection().getQueryServices();
+        this.costBased = services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED);
+        scan.setLoadColumnFamiliesOnDemand(true);
+        if (select.getHint().hasHint(Hint.NO_CACHE)) {
+            scan.setCacheBlocks(false);
+        }
+
+        scan.setCaching(statement.getFetchSize());
+        this.originalScan = ScanUtil.newScan(scan);
+        this.optimizeSubquery = optimizeSubquery;
+        this.dataPlans = dataPlans == null ? Collections.<TableRef, QueryPlan>emptyMap() : dataPlans;
+    }
+
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager) throws SQLException {
+        this(statement, select, resolver, targetColumns, parallelIteratorFactory, sequenceManager, true, false, null);
+    }
+
+    /**
+     * Builds an executable query plan from a parsed SQL statement
+     * @return executable query plan
+     * @throws SQLException if mismatched types are found, bind value do not match binds,
+     * or invalid function arguments are encountered.
+     * @throws SQLFeatureNotSupportedException if an unsupported construct is encountered
+     * @throws TableNotFoundException if table name not found in schema
+     * @throws ColumnNotFoundException if column name could not be resolved
+     * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
+     */
+    public QueryPlan compile() throws SQLException{
+        verifySCN();
+        QueryPlan plan;
+        if (select.isUnion()) {
+            plan = compileUnionAll(select);
+        } else {
+            plan = compileSelect(select);
+        }
+        return plan;
+    }
+
+    private void verifySCN() throws SQLException {
+        PhoenixConnection conn = statement.getConnection();
+        if (conn.isRunningUpgrade()) {
+            // PHOENIX-6179 : if upgrade is going on, we don't need to
+            // perform MaxLookBackAge check
+            return;
+        }
+        Long scn = conn.getSCN();
+        if (scn == null) {
+            return;
+        }
+        long maxLookBackAgeInMillis =
+            BaseScannerRegionObserverConstants.getMaxLookbackInMillis(conn.getQueryServices().getConfiguration());
+        long now = EnvironmentEdgeManager.currentTimeMillis();
+        if (maxLookBackAgeInMillis > 0 && now - maxLookBackAgeInMillis > scn){
+            throw new SQLExceptionInfo.Builder(
+                SQLExceptionCode.CANNOT_QUERY_TABLE_WITH_SCN_OLDER_THAN_MAX_LOOKBACK_AGE)
+                .build().buildException();
+        }
+    }
+
+    public QueryPlan compileUnionAll(SelectStatement select) throws SQLException { 
+        List<SelectStatement> unionAllSelects = select.getSelects();
+        List<QueryPlan> plans = new ArrayList<QueryPlan>();
+
+        for (int i=0; i < unionAllSelects.size(); i++ ) {
+            SelectStatement subSelect = unionAllSelects.get(i);
+            // Push down order-by and limit into sub-selects.
+            if (!select.getOrderBy().isEmpty() || select.getLimit() != null) {
+                if (select.getOffset() == null) {
+                    subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), select.getLimit(), null);
+                } else {
+                    subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), null, null);
+                }
+            }
+            QueryPlan subPlan = compileSubquery(subSelect, true);
+            plans.add(subPlan);
+        }
+        TableRef tableRef = UnionCompiler.contructSchemaTable(statement, plans,
+            select.hasWildcard() ? null : select.getSelect());
+        ColumnResolver resolver = FromCompiler.getResolver(tableRef);
+        StatementContext context = new StatementContext(statement, resolver, bindManager, scan, sequenceManager);
+        QueryPlan plan = compileSingleFlatQuery(
+                context,
+                select,
+                false,
+                false,
+                null,
+                false,
+                true);
+        plan = new UnionPlan(context, select, tableRef, plan.getProjector(), plan.getLimit(),
+            plan.getOffset(), plan.getOrderBy(), GroupBy.EMPTY_GROUP_BY, plans,
+            context.getBindManager().getParameterMetaData());
+        return plan;
+    }
+
+    public QueryPlan compileSelect(SelectStatement select) throws SQLException{
+        StatementContext context = new StatementContext(statement, resolver, bindManager, scan, sequenceManager);
+        if (select.isJoin()) {
+            JoinTable joinTable = JoinCompiler.compile(statement, select, context.getResolver());
+            return compileJoinQuery(context, joinTable, false, false, null);
+        } else {
+            return compileSingleQuery(context, select, false, true);
+        }
+    }
+
+    /**
+     * Call compileJoinQuery() for join queries recursively down to the leaf JoinTable nodes.
+     * If it is a leaf node, call compileSingleFlatQuery() or compileSubquery(), otherwise:
+     *      1) If option COST_BASED_OPTIMIZER_ENABLED is on and stats are available, return the
+     *         join plan with the best cost. Note that the "best" plan is only locally optimal,
+     *         and might or might not be globally optimal.
+     *      2) Otherwise, return the join plan compiled with the default strategy.
+     * @see JoinCompiler.JoinTable#getApplicableJoinStrategies()
+     */
+    protected QueryPlan compileJoinQuery(StatementContext context, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException {
+        if (joinTable.getJoinSpecs().isEmpty()) {
+            Table table = joinTable.getLeftTable();
+            SelectStatement subquery = table.getAsSubquery(orderBy);
+            if (!table.isSubselect()) {
+                context.setCurrentTable(table.getTableRef());
+                PTable projectedTable = table.createProjectedTable(!projectPKColumns, context);
+                TupleProjector projector = new TupleProjector(projectedTable);
+                boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices()
+                        .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB,
+                                DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB);
+                TupleProjector.serializeProjectorIntoScan(context.getScan(), projector,
+                        wildcardIncludesDynamicCols);
+                context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
+                table.projectColumns(context.getScan());
+                return compileSingleFlatQuery(
+                        context,
+                        subquery,
+                        asSubquery,
+                        !asSubquery,
+                        null,
+                        true,
+                        false);
+            }
+            QueryPlan plan = compileSubquery(subquery, false);
+            PTable projectedTable = table.createProjectedTable(plan.getProjector());
+            context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
+            return new TupleProjectionPlan(
+                    plan,
+                    new TupleProjector(plan.getProjector()),
+                    context,
+                    null);
+        }
+
+        List<JoinCompiler.Strategy> strategies = joinTable.getApplicableJoinStrategies();
+        assert strategies.size() > 0;
+        if (!costBased || strategies.size() == 1) {
+            return compileJoinQuery(
+                    strategies.get(0), context, joinTable, asSubquery, projectPKColumns, orderBy);
+        }
+
+        QueryPlan bestPlan = null;
+        Cost bestCost = null;
+        for (JoinCompiler.Strategy strategy : strategies) {
+            StatementContext newContext = new StatementContext(
+                    context.getStatement(), context.getResolver(), context.getBindManager(), new Scan(), context.getSequenceManager());
+            QueryPlan plan = compileJoinQuery(
+                    strategy, newContext, joinTable, asSubquery, projectPKColumns, orderBy);
+            Cost cost = plan.getCost();
+            if (bestPlan == null || cost.compareTo(bestCost) < 0) {
+                bestPlan = plan;
+                bestCost = cost;
+            }
+        }
+        context.setResolver(bestPlan.getContext().getResolver());
+        context.setCurrentTable(bestPlan.getContext().getCurrentTable());
+        return bestPlan;
+    }
+
+    protected QueryPlan compileJoinQuery(JoinCompiler.Strategy strategy, StatementContext context, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException {
+        byte[] emptyByteArray = new byte[0];
+        List<JoinSpec> joinSpecs = joinTable.getJoinSpecs();
+        boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices()
+                .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB,
+                        DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB);
+        switch (strategy) {
+            case HASH_BUILD_RIGHT: {
+                boolean[] starJoinVector = joinTable.getStarJoinVector();
+                Table table = joinTable.getLeftTable();
+                PTable initialProjectedTable;
+                TableRef tableRef;
+                SelectStatement query;
+                TupleProjector tupleProjector;
+                if (!table.isSubselect()) {
+                    context.setCurrentTable(table.getTableRef());
+                    initialProjectedTable = table.createProjectedTable(!projectPKColumns, context);
+                    tableRef = table.getTableRef();
+                    table.projectColumns(context.getScan());
+                    query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery);
+                    tupleProjector = new TupleProjector(initialProjectedTable);
+                } else {
+                    SelectStatement subquery = table.getAsSubquery(orderBy);
+                    QueryPlan plan = compileSubquery(subquery, false);
+                    initialProjectedTable = table.createProjectedTable(plan.getProjector());
+                    tableRef = plan.getTableRef();
+                    context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
+                    query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
+                    tupleProjector = new TupleProjector(plan.getProjector());
+                }
+                context.setCurrentTable(tableRef);
+                PTable projectedTable = initialProjectedTable;
+                int count = joinSpecs.size();
+                ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count];
+                List<Expression>[] joinExpressions = new List[count];
+                JoinType[] joinTypes = new JoinType[count];
+                PTable[] tables = new PTable[count];
+                int[] fieldPositions = new int[count];
+                StatementContext[] subContexts = new StatementContext[count];
+                QueryPlan[] subPlans = new QueryPlan[count];
+                HashSubPlan[] hashPlans = new HashSubPlan[count];
+                fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size();
+                for (int i = 0; i < count; i++) {
+                    JoinSpec joinSpec = joinSpecs.get(i);
+                    Scan subScan = ScanUtil.newScan(originalScan);
+                    subContexts[i] = new StatementContext(statement, context.getResolver(), context.getBindManager(), subScan, new SequenceManager(statement));
+                    subPlans[i] = compileJoinQuery(
+                            subContexts[i],
+                            joinSpec.getRhsJoinTable(),
+                            true,
+                            true,
+                            null);
+                    boolean hasPostReference = joinSpec.getRhsJoinTable().hasPostReference();
+                    if (hasPostReference) {
+                        tables[i] = subContexts[i].getResolver().getTables().get(0).getTable();
+                        projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType());
+                    } else {
+                        tables[i] = null;
+                    }
+                }
+                for (int i = 0; i < count; i++) {
+                    JoinSpec joinSpec = joinSpecs.get(i);
+                    context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes()));
+                    joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder
+                    Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContexts[i], strategy);
+                    joinExpressions[i] = joinConditions.getFirst();
+                    List<Expression> hashExpressions = joinConditions.getSecond();
+                    Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
+                    boolean optimized = getKeyExpressionCombinations(
+                            keyRangeExpressions,
+                            context,
+                            joinTable.getOriginalJoinSelectStatement(),
+                            tableRef,
+                            joinSpec.getType(),
+                            joinExpressions[i],
+                            hashExpressions);
+                    Expression keyRangeLhsExpression = keyRangeExpressions.getFirst();
+                    Expression keyRangeRhsExpression = keyRangeExpressions.getSecond();
+                    joinTypes[i] = joinSpec.getType();
+                    if (i < count - 1) {
+                        fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size()));
+                    }
+                    hashPlans[i] = new HashSubPlan(i, subPlans[i], optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), usePersistentCache, keyRangeLhsExpression, keyRangeRhsExpression);
+                }
+                TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector,
+                        wildcardIncludesDynamicCols);
+                QueryPlan plan = compileSingleFlatQuery(
+                        context,
+                        query,
+                        asSubquery,
+                        !asSubquery && joinTable.isAllLeftJoin(),
+                        null, true, false);
+                Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context);
+                Integer limit = null;
+                Integer offset = null;
+                if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
+                    limit = plan.getLimit();
+                    offset = plan.getOffset();
+                }
+                HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes,
+                        starJoinVector, tables, fieldPositions, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset));
+                return HashJoinPlan.create(joinTable.getOriginalJoinSelectStatement(), plan, joinInfo, hashPlans);
+            }
+            case HASH_BUILD_LEFT: {
+                JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
+                JoinType type = lastJoinSpec.getType();
+                JoinTable rhsJoinTable = lastJoinSpec.getRhsJoinTable();
+                Table rhsTable = rhsJoinTable.getLeftTable();
+                JoinTable lhsJoin = joinTable.createSubJoinTable(statement.getConnection());
+                Scan subScan = ScanUtil.newScan(originalScan);
+                StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), context.getBindManager(), subScan, new SequenceManager(statement));
+                QueryPlan lhsPlan = compileJoinQuery(lhsCtx, lhsJoin, true, true, null);
+                PTable rhsProjTable;
+                TableRef rhsTableRef;
+                SelectStatement rhs;
+                TupleProjector tupleProjector;
+                if (!rhsTable.isSubselect()) {
+                    context.setCurrentTable(rhsTable.getTableRef());
+                    rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context);
+                    rhsTableRef = rhsTable.getTableRef();
+                    rhsTable.projectColumns(context.getScan());
+                    rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery);
+                    tupleProjector = new TupleProjector(rhsProjTable);
+                } else {
+                    SelectStatement subquery = rhsTable.getAsSubquery(orderBy);
+                    QueryPlan plan = compileSubquery(subquery, false);
+                    rhsProjTable = rhsTable.createProjectedTable(plan.getProjector());
+                    rhsTableRef = plan.getTableRef();
+                    context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
+                    rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
+                    tupleProjector = new TupleProjector(plan.getProjector());
+                }
+                context.setCurrentTable(rhsTableRef);
+                context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes()));
+                ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[]{new ImmutableBytesPtr(emptyByteArray)};
+                Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, strategy);
+                List<Expression> joinExpressions = joinConditions.getSecond();
+                List<Expression> hashExpressions = joinConditions.getFirst();
+                boolean needsMerge = lhsJoin.hasPostReference();
+                PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null;
+                int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0;
+                PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable;
+                TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector,
+                        wildcardIncludesDynamicCols);
+                context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes()));
+                QueryPlan rhsPlan = compileSingleFlatQuery(
+                        context,
+                        rhs,
+                        asSubquery,
+                        !asSubquery && type == JoinType.Right,
+                        null,
+                        true,
+                        false);
+                Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context);
+                Integer limit = null;
+                Integer offset = null;
+                if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
+                    limit = rhsPlan.getLimit();
+                    offset = rhsPlan.getOffset();
+                }
+                HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[]{joinExpressions},
+                        new JoinType[]{type == JoinType.Right ? JoinType.Left : type}, new boolean[]{true},
+                        new PTable[]{lhsTable}, new int[]{fieldPosition}, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset));
+                boolean usePersistentCache = joinTable.getOriginalJoinSelectStatement().getHint().hasHint(Hint.USE_PERSISTENT_CACHE);
+                Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
+                getKeyExpressionCombinations(
+                        keyRangeExpressions,
+                        context,
+                        joinTable.getOriginalJoinSelectStatement(),
+                        rhsTableRef,
+                        type,
+                        joinExpressions,
+                        hashExpressions);
+                return HashJoinPlan.create(
+                        joinTable.getOriginalJoinSelectStatement(),
+                        rhsPlan,
+                        joinInfo,
+                        new HashSubPlan[]{
+                                new HashSubPlan(
+                                        0,
+                                        lhsPlan,
+                                        hashExpressions,
+                                        false,
+                                        usePersistentCache,
+                                        keyRangeExpressions.getFirst(),
+                                        keyRangeExpressions.getSecond())});
+            }
+            case SORT_MERGE: {
+                JoinTable lhsJoin =  joinTable.createSubJoinTable(statement.getConnection());
+                JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
+                JoinType type = lastJoinSpec.getType();
+                JoinTable rhsJoin = lastJoinSpec.getRhsJoinTable();
+                if (type == JoinType.Right) {
+                    JoinTable temp = lhsJoin;
+                    lhsJoin = rhsJoin;
+                    rhsJoin = temp;
+                }
+
+                List<EqualParseNode> joinConditionNodes = lastJoinSpec.getOnConditions();
+                List<OrderByNode> lhsOrderBy = Lists.<OrderByNode>newArrayListWithExpectedSize(joinConditionNodes.size());
+                List<OrderByNode> rhsOrderBy = Lists.<OrderByNode>newArrayListWithExpectedSize(joinConditionNodes.size());
+                for (EqualParseNode condition : joinConditionNodes) {
+                    lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true));
+                    rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true));
+                }
+
+                Scan lhsScan = ScanUtil.newScan(originalScan);
+                StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), context.getBindManager(), lhsScan, new SequenceManager(statement));
+                boolean preserveRowkey = !projectPKColumns && type != JoinType.Full;
+                QueryPlan lhsPlan = compileJoinQuery(lhsCtx, lhsJoin, true, !preserveRowkey, lhsOrderBy);
+                PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable();
+
+                Scan rhsScan = ScanUtil.newScan(originalScan);
+                StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), context.getBindManager(), rhsScan, new SequenceManager(statement));
+                QueryPlan rhsPlan = compileJoinQuery(rhsCtx, rhsJoin, true, true, rhsOrderBy);
+                PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable();
+
+                Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, strategy);
+                List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst();
+                List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond();
+
+                boolean needsMerge = rhsJoin.hasPostReference();
+                int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0;
+                PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable;
+
+                ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), joinTable.getOriginalJoinSelectStatement().getUdfParseNodes());
+                TableRef tableRef = resolver.getTables().get(0);
+                StatementContext subCtx = new StatementContext(statement, resolver, context.getBindManager(), ScanUtil.newScan(originalScan), new SequenceManager(statement));
+                subCtx.setCurrentTable(tableRef);
+                QueryPlan innerPlan = new SortMergeJoinPlan(
+                        subCtx,
+                        joinTable.getOriginalJoinSelectStatement(),
+                        tableRef,
+                        type == JoinType.Right ? JoinType.Left : type,
+                        lhsPlan,
+                        rhsPlan,
+                        new Pair<List<Expression>,List<Expression>>(lhsKeyExpressions, rhsKeyExpressions),
+                        rhsKeyExpressions,
+                        projectedTable,
+                        lhsProjTable,
+                        needsMerge ? rhsProjTable : null,
+                        fieldPosition,
+                        lastJoinSpec.isSingleValueOnly(),
+                        new Pair<List<OrderByNode>,List<OrderByNode>>(lhsOrderBy, rhsOrderBy));
+                context.setCurrentTable(tableRef);
+                context.setResolver(resolver);
+                TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString()));
+                ParseNode where = joinTable.getPostFiltersCombined();
+                SelectStatement select = asSubquery ?
+                        NODE_FACTORY.select(
+                                from,
+                                joinTable.getOriginalJoinSelectStatement().getHint(),
+                                false,
+                                Collections.<AliasedNode>emptyList(),
+                                where,
+                                null,
+                                null,
+                                orderBy,
+                                null,
+                                null,
+                                0,
+                                false,
+                                joinTable.getOriginalJoinSelectStatement().hasSequence(),
+                                Collections.<SelectStatement>emptyList(),
+                                joinTable.getOriginalJoinSelectStatement().getUdfParseNodes()) :
+                         NODE_FACTORY.select(
+                                 joinTable.getOriginalJoinSelectStatement(),
+                                 from,
+                                 where);
+
+                return compileSingleFlatQuery(
+                        context,
+                        select,
+                        asSubquery,
+                        false,
+                        innerPlan,
+                        true,
+                        false);
+            }
+            default:
+                throw new IllegalArgumentException("Invalid join strategy '" + strategy + "'");
+        }
+    }
+
+    private boolean getKeyExpressionCombinations(Pair<Expression, Expression> combination, StatementContext context, SelectStatement select, TableRef table, JoinType type, final List<Expression> joinExpressions, final List<Expression> hashExpressions) throws SQLException {
+        if ((type != JoinType.Inner && type != JoinType.Semi) || this.noChildParentJoinOptimization)
+            return false;
+
+        Scan scanCopy = ScanUtil.newScan(context.getScan());
+        StatementContext contextCopy = new StatementContext(statement, context.getResolver(), context.getBindManager(), scanCopy, new SequenceManager(statement));
+        contextCopy.setCurrentTable(table);
+        List<Expression> lhsCombination = Lists.<Expression> newArrayList();
+        boolean complete = WhereOptimizer.getKeyExpressionCombination(lhsCombination, contextCopy, select, joinExpressions);
+        if (lhsCombination.isEmpty())
+            return false;
+
+        List<Expression> rhsCombination = Lists.newArrayListWithExpectedSize(lhsCombination.size());
+        for (int i = 0; i < lhsCombination.size(); i++) {
+            Expression lhs = lhsCombination.get(i);
+            for (int j = 0; j < joinExpressions.size(); j++) {
+                if (lhs == joinExpressions.get(j)) {
+                    rhsCombination.add(hashExpressions.get(j));
+                    break;
+                }
+            }
+        }
+
+        if (lhsCombination.size() == 1) {
+            combination.setFirst(lhsCombination.get(0));
+            combination.setSecond(rhsCombination.get(0));
+        } else {
+            combination.setFirst(new RowValueConstructorExpression(lhsCombination, false));
+            combination.setSecond(new RowValueConstructorExpression(rhsCombination, false));
+        }
+
+        return type == JoinType.Semi && complete;
+    }
+
+    protected QueryPlan compileSubquery(
+            SelectStatement subquerySelectStatement,
+            boolean pushDownMaxRows) throws SQLException {
+        PhoenixConnection phoenixConnection = this.statement.getConnection();
+        RewriteResult rewriteResult =
+                ParseNodeUtil.rewrite(subquerySelectStatement, phoenixConnection);
+        int maxRows = this.statement.getMaxRows();
+        this.statement.setMaxRows(pushDownMaxRows ? maxRows : 0); // overwrite maxRows to avoid its impact on inner queries.
+        QueryPlan queryPlan = new QueryCompiler(
+                this.statement,
+                rewriteResult.getRewrittenSelectStatement(),
+                rewriteResult.getColumnResolver(),
+                bindManager,
+                false,
+                optimizeSubquery,
+                null).compile();
+        if (optimizeSubquery) {
+            queryPlan = statement.getConnection().getQueryServices().getOptimizer().optimize(
+                    statement,
+                    queryPlan);
+        }
+        this.statement.setMaxRows(maxRows); // restore maxRows.
+        return queryPlan;
+    }
+
+    protected QueryPlan compileSingleQuery(StatementContext context, SelectStatement select, boolean asSubquery, boolean allowPageFilter) throws SQLException{
+        SelectStatement innerSelect = select.getInnerSelectStatement();
+        if (innerSelect == null) {
+            return compileSingleFlatQuery(context, select, asSubquery, allowPageFilter, null, false, false);
+        }
+
+        if((innerSelect.getOffset() != null && (!innerSelect.getOffset().isIntegerOffset()) ||
+                select.getOffset() != null && !select.getOffset().isIntegerOffset())) {
+            throw new SQLException("RVC Offset not allowed with subqueries.");
+        }
+
+        QueryPlan innerPlan = compileSubquery(innerSelect, false);
+        RowProjector innerQueryPlanRowProjector = innerPlan.getProjector();
+        TupleProjector tupleProjector = new TupleProjector(innerQueryPlanRowProjector);
+
+        // Replace the original resolver and table with those having compiled type info.
+        TableRef tableRef = context.getResolver().getTables().get(0);
+        ColumnResolver resolver = FromCompiler.getResolverForCompiledDerivedTable(statement.getConnection(), tableRef, innerQueryPlanRowProjector);
+        context.setResolver(resolver);
+        tableRef = resolver.getTables().get(0);
+        context.setCurrentTable(tableRef);
+        innerPlan = new TupleProjectionPlan(innerPlan, tupleProjector, context, null);
+
+        return compileSingleFlatQuery(context, select, asSubquery, allowPageFilter, innerPlan, false, false);
+    }
+
+    protected QueryPlan compileSingleFlatQuery(
+            StatementContext context,
+            SelectStatement select,
+            boolean asSubquery,
+            boolean allowPageFilter,
+            QueryPlan innerPlan,
+            boolean inJoin,
+            boolean inUnion) throws SQLException {
+        boolean isApplicable = true;
+        PTable projectedTable = null;
+        if (this.projectTuples) {
+            projectedTable = TupleProjectionCompiler.createProjectedTable(select, context);
+            if (projectedTable != null) {
+                context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), select.getUdfParseNodes()));
+            }
+        }
+        
+        ColumnResolver resolver = context.getResolver();
+        TableRef tableRef = context.getCurrentTable();
+        PTable table = tableRef.getTable();
+
+        ParseNode viewWhere = null;
+        if (table.getViewStatement() != null) {
+            viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere();
+        }
+        Integer limit = LimitCompiler.compile(context, select);
+
+        CompiledOffset compiledOffset = null;
+        Integer offset = null;
+        try {
+            compiledOffset = OffsetCompiler.getOffsetCompiler().compile(context, select, inJoin, inUnion);
+            offset = compiledOffset.getIntegerOffset().orNull();
+        } catch(RowValueConstructorOffsetNotCoercibleException e){
+            //This current plan is not executable
+            compiledOffset = new CompiledOffset(Optional.<Integer>absent(),Optional.<byte[]>absent());
+            isApplicable = false;
+        }
+
+        GroupBy groupBy = GroupByCompiler.compile(context, select);
+        // Optimize the HAVING clause by finding any group by expressions that can be moved
+        // to the WHERE clause
+        select = HavingCompiler.rewrite(context, select, groupBy);
+        Expression having = HavingCompiler.compile(context, select, groupBy);
+        // Don't pass groupBy when building where clause expression, because we do not want to wrap these
+        // expressions as group by key expressions since they're pre, not post filtered.
+        if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) {
+        	context.setResolver(FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes()));
+        }
+        Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode> newHashSet();
+        Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries, compiledOffset.getByteOffset());
+        // Recompile GROUP BY now that we've figured out our ScanRanges so we know
+        // definitively whether or not we'll traverse in row key order.
+        groupBy = groupBy.compile(context, innerPlan, where);
+        context.setResolver(resolver); // recover resolver
+        boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices()
+                .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB,
+                        DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB);
+        RowProjector projector = ProjectionCompiler.compile(context, select, groupBy,
+                asSubquery ? Collections.emptyList() : targetColumns, where,
+                wildcardIncludesDynamicCols);
+        OrderBy orderBy = OrderByCompiler.compile(
+                context,
+                select,
+                groupBy,
+                limit,
+                compiledOffset,
+                projector,
+                innerPlan,
+                where);
+        context.getAggregationManager().compile(context, groupBy);
+        // Final step is to build the query plan
+        if (!asSubquery) {
+            int maxRows = statement.getMaxRows();
+            if (maxRows > 0) {
+                if (limit != null) {
+                    limit = Math.min(limit, maxRows);
+                } else {
+                    limit = maxRows;
+                }
+            }
+        }
+
+        if (projectedTable != null) {
+            TupleProjector.serializeProjectorIntoScan(context.getScan(),
+                    new TupleProjector(projectedTable), wildcardIncludesDynamicCols &&
+                            projector.projectDynColsInWildcardQueries());
+        }
+        
+        QueryPlan plan = innerPlan;
+        QueryPlan dataPlan = dataPlans.get(tableRef);
+        if (plan == null) {
+            ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;
+            plan = select.getFrom() == null
+                    ? new LiteralResultIterationPlan(context, select, tableRef, projector, limit, offset, orderBy,
+                            parallelIteratorFactory)
+                    : (select.isAggregate() || select.isDistinct()
+                            ? new AggregatePlan(context, select, tableRef, projector, limit, offset, orderBy,
+                                    parallelIteratorFactory, groupBy, having, dataPlan)
+                            : new ScanPlan(context, select, tableRef, projector, limit, offset, orderBy,
+                                    parallelIteratorFactory, allowPageFilter, dataPlan, compiledOffset.getByteOffset()));
+        }
+        SelectStatement planSelect = asSubquery ? select : this.select;
+        if (!subqueries.isEmpty()) {
+            int count = subqueries.size();
+            WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count];
+            int i = 0;
+            for (SubqueryParseNode subqueryNode : subqueries) {
+                SelectStatement stmt = subqueryNode.getSelectNode();
+                subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, subqueryNode.expectSingleRow());
+            }
+            plan = HashJoinPlan.create(planSelect, plan, null, subPlans);
+        }
+
+        if (innerPlan != null) {
+            if (LiteralExpression.isTrue(where)) {
+                where = null; // we do not pass "true" as filter
+            }
+            plan = select.isAggregate() || select.isDistinct()
+                    ? new ClientAggregatePlan(context, planSelect, tableRef, projector, limit, offset, where, orderBy,
+                            groupBy, having, plan)
+                    : new ClientScanPlan(context, planSelect, tableRef, projector, limit, offset, where, orderBy, plan);
+
+        }
+
+        if(plan instanceof BaseQueryPlan){
+            ((BaseQueryPlan) plan).setApplicable(isApplicable);
+        }
+        return plan;
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryPlan.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryPlan.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RowProjector.java
similarity index 100%
rename from phoenix-core/src/main/java/org/apache/phoenix/compile/RowProjector.java
rename to phoenix-core-client/src/main/java/org/apache/phoenix/compile/RowProjector.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java
new file mode 100644
index 0000000000..e9de7b75a6
--- /dev/null
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -0,0 +1,785 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.phoenix.thirdparty.com.google.common.base.Optional;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Scan;
... 197629 lines suppressed ...